diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index f559f704d5..18ce3e3474 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -4,7 +4,7 @@ steps: provider: "gcp" env: TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.16.0-SNAPSHOT + STACK_VERSION: 8.18.0-SNAPSHOT WORKSPACE: /tmp/go-elasticsearch matrix: setup: diff --git a/.github/workflows/test-api.yml b/.github/workflows/test-api.yml index 92aa7bbef6..7c164f7b42 100644 --- a/.github/workflows/test-api.yml +++ b/.github/workflows/test-api.yml @@ -11,7 +11,7 @@ jobs: test-free: name: Free env: - ELASTICSEARCH_VERSION: elasticsearch:8.16.0-SNAPSHOT + ELASTICSEARCH_VERSION: elasticsearch:8.18.0-SNAPSHOT ELASTICSEARCH_URL: http://localhost:9200 runs-on: ubuntu-latest steps: @@ -43,7 +43,7 @@ jobs: test-platinum: name: Platinum env: - ELASTICSEARCH_VERSION: elasticsearch:8.16.0-SNAPSHOT + ELASTICSEARCH_VERSION: elasticsearch:8.18.0-SNAPSHOT ELASTICSEARCH_URL: https://elastic:elastic@localhost:9200 runs-on: ubuntu-latest steps: diff --git a/CHANGELOG.md b/CHANGELOG.md index 649e058a57..f71b32558b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 8.17.1 + +* Update elastictransport to 8.6.1 + +Thanks to @AkisAya and @jmfrees for their contributions! + # 8.17.0 * Expose BulkIndexer total flushed bytes metric [#914](https://github.com/elastic/go-elasticsearch/pull/914) thanks to @aureleoules diff --git a/Makefile b/Makefile index 283682fbcb..2b6627fd12 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ SHELL := /bin/bash -ELASTICSEARCH_DEFAULT_BUILD_VERSION = "8.16.0-SNAPSHOT" +ELASTICSEARCH_DEFAULT_BUILD_VERSION = "8.18.0-SNAPSHOT" ##@ Test test-unit: ## Run unit tests diff --git a/_benchmarks/benchmarks/go.mod b/_benchmarks/benchmarks/go.mod index a4ad43ef05..71d92d788a 100644 --- a/_benchmarks/benchmarks/go.mod +++ b/_benchmarks/benchmarks/go.mod @@ -1,14 +1,17 @@ module github.com/elastic/go-elasticsearch/v8/benchmarks -go 1.14 +go 1.21 + +toolchain go1.24.1 replace github.com/elastic/go-elasticsearch/v8 => ../../ require ( - github.com/elastic/elastic-transport-go/v8 v8.0.0-20211216131617-bbee439d559c + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.0.0-20200408073057-6f36a473b19f github.com/fatih/color v1.7.0 - github.com/mattn/go-colorable v0.1.6 // indirect github.com/montanaflynn/stats v0.6.3 github.com/tidwall/gjson v1.9.3 ) + +require github.com/mattn/go-colorable v0.1.6 // indirect diff --git a/_examples/bulk/benchmarks/go.mod b/_examples/bulk/benchmarks/go.mod index cc652cf6c6..27a6a28808 100644 --- a/_examples/bulk/benchmarks/go.mod +++ b/_examples/bulk/benchmarks/go.mod @@ -1,7 +1,8 @@ module github.com/elastic/go-elasticsearch/v8/_examples/bulk/benchmarks -go 1.21 -toolchain go1.21.0 +go 1.22 + +toolchain go1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ../../.. @@ -13,7 +14,7 @@ require ( ) require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect go.opentelemetry.io/otel v1.28.0 // indirect diff --git a/_examples/bulk/benchmarks/go.sum b/_examples/bulk/benchmarks/go.sum index 4d8b417d40..8b01b6831d 100644 --- a/_examples/bulk/benchmarks/go.sum +++ b/_examples/bulk/benchmarks/go.sum @@ -2,8 +2,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/_examples/bulk/go.mod b/_examples/bulk/go.mod index c3237b2b8d..e3d1ee2377 100644 --- a/_examples/bulk/go.mod +++ b/_examples/bulk/go.mod @@ -12,7 +12,7 @@ require ( ) require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect go.opentelemetry.io/otel v1.28.0 // indirect diff --git a/_examples/bulk/go.sum b/_examples/bulk/go.sum index 7ede759434..79edc2508e 100644 --- a/_examples/bulk/go.sum +++ b/_examples/bulk/go.sum @@ -4,8 +4,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/_examples/bulk/kafka/go.mod b/_examples/bulk/kafka/go.mod index eff6c665a5..01238940bc 100644 --- a/_examples/bulk/kafka/go.mod +++ b/_examples/bulk/kafka/go.mod @@ -1,6 +1,7 @@ module github.com/elastic/go-elasticsearch/v8/_examples/bulk/kafka go 1.21 + toolchain go1.21.0 replace github.com/elastic/go-elasticsearch/v8 => ../../.. @@ -13,7 +14,7 @@ require ( require ( github.com/armon/go-radix v1.0.0 // indirect - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/elastic/go-licenser v0.3.1 // indirect github.com/elastic/go-sysinfo v1.1.1 // indirect github.com/elastic/go-windows v1.0.0 // indirect diff --git a/_examples/bulk/kafka/go.sum b/_examples/bulk/kafka/go.sum index f1be7e8b0d..0b5f141175 100644 --- a/_examples/bulk/kafka/go.sum +++ b/_examples/bulk/kafka/go.sum @@ -7,6 +7,7 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-sysinfo v1.1.1 h1:ZVlaLDyhVkDfjwPGU55CQRCRolNpc7P0BbyhhQZQmMI= diff --git a/_examples/cloudfunction/go.mod b/_examples/cloudfunction/go.mod index 1f79461a30..d5bb03172a 100644 --- a/_examples/cloudfunction/go.mod +++ b/_examples/cloudfunction/go.mod @@ -1,6 +1,7 @@ module github.com/elastic/go-elasticsearch/v8/_examples/clusterstatus go 1.21 + toolchain go1.21.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. @@ -8,7 +9,7 @@ replace github.com/elastic/go-elasticsearch/v8 => ../.. require github.com/elastic/go-elasticsearch/v8 v8.0.0-20210817150010-57d659deaca7 require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect go.opentelemetry.io/otel v1.28.0 // indirect diff --git a/_examples/cloudfunction/go.sum b/_examples/cloudfunction/go.sum index 55fd35b5d6..2a6603fec7 100644 --- a/_examples/cloudfunction/go.sum +++ b/_examples/cloudfunction/go.sum @@ -1,7 +1,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/_examples/encoding/go.mod b/_examples/encoding/go.mod index 2362ce44c9..5bf34e326e 100644 --- a/_examples/encoding/go.mod +++ b/_examples/encoding/go.mod @@ -1,6 +1,7 @@ module github.com/elastic/go-elasticsearch/v8/_examples/encoding go 1.21 + toolchain go1.21.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. @@ -13,7 +14,7 @@ require ( ) require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect diff --git a/_examples/encoding/go.sum b/_examples/encoding/go.sum index e374e36611..3fa207cabe 100644 --- a/_examples/encoding/go.sum +++ b/_examples/encoding/go.sum @@ -1,7 +1,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= diff --git a/_examples/extension/go.mod b/_examples/extension/go.mod index eeb981c272..558b029ab1 100644 --- a/_examples/extension/go.mod +++ b/_examples/extension/go.mod @@ -1,12 +1,13 @@ module github.com/elastic/go-elasticsearch/v8/_examples/extension go 1.21 + toolchain go1.21.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.13.1 ) diff --git a/_examples/extension/go.sum b/_examples/extension/go.sum index 5874f0b317..fddb85edcb 100644 --- a/_examples/extension/go.sum +++ b/_examples/extension/go.sum @@ -1,7 +1,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/_examples/fasthttp/go.mod b/_examples/fasthttp/go.mod index 23347ca0c1..638edb1662 100644 --- a/_examples/fasthttp/go.mod +++ b/_examples/fasthttp/go.mod @@ -1,6 +1,7 @@ module github.com/elastic/go-elasticsearch/v8/_examples/fasthttp go 1.21 + toolchain go1.21.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. @@ -12,7 +13,7 @@ require ( require ( github.com/andybalholm/brotli v1.0.4 // indirect - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/klauspost/compress v1.15.0 // indirect diff --git a/_examples/fasthttp/go.sum b/_examples/fasthttp/go.sum index 02a4d0c884..6e69bc1c4a 100644 --- a/_examples/fasthttp/go.sum +++ b/_examples/fasthttp/go.sum @@ -2,8 +2,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/_examples/go.mod b/_examples/go.mod index 3ab9752de4..ad4b90e3ec 100644 --- a/_examples/go.mod +++ b/_examples/go.mod @@ -1,14 +1,15 @@ module _examples -go 1.21 -toolchain go1.21.0 +go 1.22 + +toolchain go1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ../ require github.com/elastic/go-elasticsearch/v8 v8.0.0-00010101000000-000000000000 require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect go.opentelemetry.io/otel v1.28.0 // indirect diff --git a/_examples/go.sum b/_examples/go.sum index 5874f0b317..fddb85edcb 100644 --- a/_examples/go.sum +++ b/_examples/go.sum @@ -1,7 +1,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/_examples/instrumentation/go.mod b/_examples/instrumentation/go.mod index 5d09edfd40..de166b7731 100644 --- a/_examples/instrumentation/go.mod +++ b/_examples/instrumentation/go.mod @@ -1,12 +1,13 @@ module github.com/elastic/go-elasticsearch/v8/_examples/instrumentation/opencensus go 1.21 + toolchain go1.21.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.0.0-20191002063538-b491ce54d752 github.com/fatih/color v1.7.0 go.elastic.co/apm v1.11.0 diff --git a/_examples/instrumentation/go.sum b/_examples/instrumentation/go.sum index 51a6ffbd09..aa58c6ba9c 100644 --- a/_examples/instrumentation/go.sum +++ b/_examples/instrumentation/go.sum @@ -14,8 +14,8 @@ github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAEl github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= github.com/elastic/go-sysinfo v1.1.1 h1:ZVlaLDyhVkDfjwPGU55CQRCRolNpc7P0BbyhhQZQmMI= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= diff --git a/_examples/logging/go.mod b/_examples/logging/go.mod index 31fa43e788..fc16f83029 100644 --- a/_examples/logging/go.mod +++ b/_examples/logging/go.mod @@ -1,12 +1,13 @@ module github.com/elastic/go-elasticsearch/v8/_examples/logging go 1.21 + toolchain go1.21.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.0.0-00010101000000-000000000000 github.com/rs/zerolog v1.32.0 ) diff --git a/_examples/logging/go.sum b/_examples/logging/go.sum index 6408ed138b..15366ad77a 100644 --- a/_examples/logging/go.sum +++ b/_examples/logging/go.sum @@ -2,8 +2,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/_examples/security/go.mod b/_examples/security/go.mod index a7a40bac6b..3f94e5bd22 100644 --- a/_examples/security/go.mod +++ b/_examples/security/go.mod @@ -1,6 +1,7 @@ module github.com/elastic/go-elasticsearch/v8/_examples/security go 1.21 + toolchain go1.21.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. @@ -8,7 +9,7 @@ replace github.com/elastic/go-elasticsearch/v8 => ../.. require github.com/elastic/go-elasticsearch/v8 v8.0.0-00010101000000-000000000000 require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect go.opentelemetry.io/otel v1.28.0 // indirect diff --git a/_examples/security/go.sum b/_examples/security/go.sum index 55fd35b5d6..2a6603fec7 100644 --- a/_examples/security/go.sum +++ b/_examples/security/go.sum @@ -1,7 +1,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/_examples/xkcdsearch/go.mod b/_examples/xkcdsearch/go.mod index 117289247e..bf6dd20526 100644 --- a/_examples/xkcdsearch/go.mod +++ b/_examples/xkcdsearch/go.mod @@ -1,6 +1,7 @@ module github.com/elastic/go-elasticsearch/v8/_examples/xkcdsearch go 1.21 + toolchain go1.21.0 replace github.com/elastic/go-elasticsearch/v8 => ../.. @@ -13,7 +14,7 @@ require ( ) require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect diff --git a/_examples/xkcdsearch/go.sum b/_examples/xkcdsearch/go.sum index 8e0670992e..da6a5d0690 100644 --- a/_examples/xkcdsearch/go.sum +++ b/_examples/xkcdsearch/go.sum @@ -1,7 +1,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/esapi/api._.go b/esapi/api._.go index a5bafa1819..c961f6f32c 100755 --- a/esapi/api._.go +++ b/esapi/api._.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0 (cda8773): DO NOT EDIT +// Code generated from specification version 8.18.0 (4ac5dd1): DO NOT EDIT package esapi @@ -103,8 +103,10 @@ type API struct { EqlGet EqlGet EqlGetStatus EqlGetStatus EqlSearch EqlSearch + EsqlAsyncQueryDelete EsqlAsyncQueryDelete EsqlAsyncQueryGet EsqlAsyncQueryGet EsqlAsyncQuery EsqlAsyncQuery + EsqlAsyncQueryStop EsqlAsyncQueryStop EsqlQuery EsqlQuery Exists Exists ExistsSource ExistsSource @@ -126,10 +128,17 @@ type API struct { GraphExplore GraphExplore HealthReport HealthReport Index Index + InferenceChatCompletionUnified InferenceChatCompletionUnified + InferenceCompletion InferenceCompletion InferenceDelete InferenceDelete InferenceGet InferenceGet InferenceInference InferenceInference InferencePut InferencePut + InferenceRerank InferenceRerank + InferenceSparseEmbedding InferenceSparseEmbedding + InferenceStreamCompletion InferenceStreamCompletion + InferenceTextEmbedding InferenceTextEmbedding + InferenceUpdate InferenceUpdate Info Info KnnSearch KnnSearch LogstashDeletePipeline LogstashDeletePipeline @@ -153,6 +162,7 @@ type API struct { QueryRulesListRulesets QueryRulesListRulesets QueryRulesPutRule QueryRulesPutRule QueryRulesPutRuleset QueryRulesPutRuleset + QueryRulesTest QueryRulesTest RankEval RankEval Reindex Reindex ReindexRethrottle ReindexRethrottle @@ -272,78 +282,86 @@ type Cluster struct { // Indices contains the Indices APIs type Indices struct { - AddBlock IndicesAddBlock - Analyze IndicesAnalyze - ClearCache IndicesClearCache - Clone IndicesClone - Close IndicesClose - CreateDataStream IndicesCreateDataStream - Create IndicesCreate - DataStreamsStats IndicesDataStreamsStats - DeleteAlias IndicesDeleteAlias - DeleteDataLifecycle IndicesDeleteDataLifecycle - DeleteDataStream IndicesDeleteDataStream - DeleteIndexTemplate IndicesDeleteIndexTemplate - Delete IndicesDelete - DeleteTemplate IndicesDeleteTemplate - DiskUsage IndicesDiskUsage - Downsample IndicesDownsample - ExistsAlias IndicesExistsAlias - ExistsIndexTemplate IndicesExistsIndexTemplate - Exists IndicesExists - ExistsTemplate IndicesExistsTemplate - ExplainDataLifecycle IndicesExplainDataLifecycle - FieldUsageStats IndicesFieldUsageStats - Flush IndicesFlush - Forcemerge IndicesForcemerge - GetAlias IndicesGetAlias - GetDataLifecycle IndicesGetDataLifecycle - GetDataStream IndicesGetDataStream - GetFieldMapping IndicesGetFieldMapping - GetIndexTemplate IndicesGetIndexTemplate - GetMapping IndicesGetMapping - Get IndicesGet - GetSettings IndicesGetSettings - GetTemplate IndicesGetTemplate - MigrateToDataStream IndicesMigrateToDataStream - ModifyDataStream IndicesModifyDataStream - Open IndicesOpen - PromoteDataStream IndicesPromoteDataStream - PutAlias IndicesPutAlias - PutDataLifecycle IndicesPutDataLifecycle - PutIndexTemplate IndicesPutIndexTemplate - PutMapping IndicesPutMapping - PutSettings IndicesPutSettings - PutTemplate IndicesPutTemplate - Recovery IndicesRecovery - Refresh IndicesRefresh - ReloadSearchAnalyzers IndicesReloadSearchAnalyzers - ResolveCluster IndicesResolveCluster - ResolveIndex IndicesResolveIndex - Rollover IndicesRollover - Segments IndicesSegments - ShardStores IndicesShardStores - Shrink IndicesShrink - SimulateIndexTemplate IndicesSimulateIndexTemplate - SimulateTemplate IndicesSimulateTemplate - Split IndicesSplit - Stats IndicesStats - Unfreeze IndicesUnfreeze - UpdateAliases IndicesUpdateAliases - ValidateQuery IndicesValidateQuery + AddBlock IndicesAddBlock + Analyze IndicesAnalyze + CancelMigrateReindex IndicesCancelMigrateReindex + ClearCache IndicesClearCache + Clone IndicesClone + Close IndicesClose + CreateDataStream IndicesCreateDataStream + CreateFrom IndicesCreateFrom + Create IndicesCreate + DataStreamsStats IndicesDataStreamsStats + DeleteAlias IndicesDeleteAlias + DeleteDataLifecycle IndicesDeleteDataLifecycle + DeleteDataStream IndicesDeleteDataStream + DeleteIndexTemplate IndicesDeleteIndexTemplate + Delete IndicesDelete + DeleteTemplate IndicesDeleteTemplate + DiskUsage IndicesDiskUsage + Downsample IndicesDownsample + ExistsAlias IndicesExistsAlias + ExistsIndexTemplate IndicesExistsIndexTemplate + Exists IndicesExists + ExistsTemplate IndicesExistsTemplate + ExplainDataLifecycle IndicesExplainDataLifecycle + FieldUsageStats IndicesFieldUsageStats + Flush IndicesFlush + Forcemerge IndicesForcemerge + GetAlias IndicesGetAlias + GetDataLifecycle IndicesGetDataLifecycle + GetDataLifecycleStats IndicesGetDataLifecycleStats + GetDataStream IndicesGetDataStream + GetFieldMapping IndicesGetFieldMapping + GetIndexTemplate IndicesGetIndexTemplate + GetMapping IndicesGetMapping + GetMigrateReindexStatus IndicesGetMigrateReindexStatus + Get IndicesGet + GetSettings IndicesGetSettings + GetTemplate IndicesGetTemplate + MigrateReindex IndicesMigrateReindex + MigrateToDataStream IndicesMigrateToDataStream + ModifyDataStream IndicesModifyDataStream + Open IndicesOpen + PromoteDataStream IndicesPromoteDataStream + PutAlias IndicesPutAlias + PutDataLifecycle IndicesPutDataLifecycle + PutIndexTemplate IndicesPutIndexTemplate + PutMapping IndicesPutMapping + PutSettings IndicesPutSettings + PutTemplate IndicesPutTemplate + Recovery IndicesRecovery + Refresh IndicesRefresh + ReloadSearchAnalyzers IndicesReloadSearchAnalyzers + ResolveCluster IndicesResolveCluster + ResolveIndex IndicesResolveIndex + Rollover IndicesRollover + Segments IndicesSegments + ShardStores IndicesShardStores + Shrink IndicesShrink + SimulateIndexTemplate IndicesSimulateIndexTemplate + SimulateTemplate IndicesSimulateTemplate + Split IndicesSplit + Stats IndicesStats + Unfreeze IndicesUnfreeze + UpdateAliases IndicesUpdateAliases + ValidateQuery IndicesValidateQuery } // Ingest contains the Ingest APIs type Ingest struct { - DeleteGeoipDatabase IngestDeleteGeoipDatabase - DeletePipeline IngestDeletePipeline - GeoIPStats IngestGeoIPStats - GetGeoipDatabase IngestGetGeoipDatabase - GetPipeline IngestGetPipeline - ProcessorGrok IngestProcessorGrok - PutGeoipDatabase IngestPutGeoipDatabase - PutPipeline IngestPutPipeline - Simulate IngestSimulate + DeleteGeoipDatabase IngestDeleteGeoipDatabase + DeleteIPLocationDatabase IngestDeleteIPLocationDatabase + DeletePipeline IngestDeletePipeline + GeoIPStats IngestGeoIPStats + GetGeoipDatabase IngestGetGeoipDatabase + GetIPLocationDatabase IngestGetIPLocationDatabase + GetPipeline IngestGetPipeline + ProcessorGrok IngestProcessorGrok + PutGeoipDatabase IngestPutGeoipDatabase + PutIPLocationDatabase IngestPutIPLocationDatabase + PutPipeline IngestPutPipeline + Simulate IngestSimulate } // Nodes contains the Nodes APIs @@ -363,18 +381,19 @@ type Remote struct { // Snapshot contains the Snapshot APIs type Snapshot struct { - CleanupRepository SnapshotCleanupRepository - Clone SnapshotClone - CreateRepository SnapshotCreateRepository - Create SnapshotCreate - DeleteRepository SnapshotDeleteRepository - Delete SnapshotDelete - GetRepository SnapshotGetRepository - Get SnapshotGet - RepositoryAnalyze SnapshotRepositoryAnalyze - Restore SnapshotRestore - Status SnapshotStatus - VerifyRepository SnapshotVerifyRepository + CleanupRepository SnapshotCleanupRepository + Clone SnapshotClone + CreateRepository SnapshotCreateRepository + Create SnapshotCreate + DeleteRepository SnapshotDeleteRepository + Delete SnapshotDelete + GetRepository SnapshotGetRepository + Get SnapshotGet + RepositoryAnalyze SnapshotRepositoryAnalyze + RepositoryVerifyIntegrity SnapshotRepositoryVerifyIntegrity + Restore SnapshotRestore + Status SnapshotStatus + VerifyRepository SnapshotVerifyRepository } // Tasks contains the Tasks APIs @@ -552,6 +571,7 @@ type Security struct { CreateAPIKey SecurityCreateAPIKey CreateCrossClusterAPIKey SecurityCreateCrossClusterAPIKey CreateServiceToken SecurityCreateServiceToken + DelegatePki SecurityDelegatePki DeletePrivileges SecurityDeletePrivileges DeleteRoleMapping SecurityDeleteRoleMapping DeleteRole SecurityDeleteRole @@ -704,8 +724,10 @@ func New(t Transport) *API { EqlGet: newEqlGetFunc(t), EqlGetStatus: newEqlGetStatusFunc(t), EqlSearch: newEqlSearchFunc(t), + EsqlAsyncQueryDelete: newEsqlAsyncQueryDeleteFunc(t), EsqlAsyncQueryGet: newEsqlAsyncQueryGetFunc(t), EsqlAsyncQuery: newEsqlAsyncQueryFunc(t), + EsqlAsyncQueryStop: newEsqlAsyncQueryStopFunc(t), EsqlQuery: newEsqlQueryFunc(t), Exists: newExistsFunc(t), ExistsSource: newExistsSourceFunc(t), @@ -727,10 +749,17 @@ func New(t Transport) *API { GraphExplore: newGraphExploreFunc(t), HealthReport: newHealthReportFunc(t), Index: newIndexFunc(t), + InferenceChatCompletionUnified: newInferenceChatCompletionUnifiedFunc(t), + InferenceCompletion: newInferenceCompletionFunc(t), InferenceDelete: newInferenceDeleteFunc(t), InferenceGet: newInferenceGetFunc(t), InferenceInference: newInferenceInferenceFunc(t), InferencePut: newInferencePutFunc(t), + InferenceRerank: newInferenceRerankFunc(t), + InferenceSparseEmbedding: newInferenceSparseEmbeddingFunc(t), + InferenceStreamCompletion: newInferenceStreamCompletionFunc(t), + InferenceTextEmbedding: newInferenceTextEmbeddingFunc(t), + InferenceUpdate: newInferenceUpdateFunc(t), Info: newInfoFunc(t), KnnSearch: newKnnSearchFunc(t), LogstashDeletePipeline: newLogstashDeletePipelineFunc(t), @@ -754,6 +783,7 @@ func New(t Transport) *API { QueryRulesListRulesets: newQueryRulesListRulesetsFunc(t), QueryRulesPutRule: newQueryRulesPutRuleFunc(t), QueryRulesPutRuleset: newQueryRulesPutRulesetFunc(t), + QueryRulesTest: newQueryRulesTestFunc(t), RankEval: newRankEvalFunc(t), Reindex: newReindexFunc(t), ReindexRethrottle: newReindexRethrottleFunc(t), @@ -866,76 +896,84 @@ func New(t Transport) *API { Stats: newClusterStatsFunc(t), }, Indices: &Indices{ - AddBlock: newIndicesAddBlockFunc(t), - Analyze: newIndicesAnalyzeFunc(t), - ClearCache: newIndicesClearCacheFunc(t), - Clone: newIndicesCloneFunc(t), - Close: newIndicesCloseFunc(t), - CreateDataStream: newIndicesCreateDataStreamFunc(t), - Create: newIndicesCreateFunc(t), - DataStreamsStats: newIndicesDataStreamsStatsFunc(t), - DeleteAlias: newIndicesDeleteAliasFunc(t), - DeleteDataLifecycle: newIndicesDeleteDataLifecycleFunc(t), - DeleteDataStream: newIndicesDeleteDataStreamFunc(t), - DeleteIndexTemplate: newIndicesDeleteIndexTemplateFunc(t), - Delete: newIndicesDeleteFunc(t), - DeleteTemplate: newIndicesDeleteTemplateFunc(t), - DiskUsage: newIndicesDiskUsageFunc(t), - Downsample: newIndicesDownsampleFunc(t), - ExistsAlias: newIndicesExistsAliasFunc(t), - ExistsIndexTemplate: newIndicesExistsIndexTemplateFunc(t), - Exists: newIndicesExistsFunc(t), - ExistsTemplate: newIndicesExistsTemplateFunc(t), - ExplainDataLifecycle: newIndicesExplainDataLifecycleFunc(t), - FieldUsageStats: newIndicesFieldUsageStatsFunc(t), - Flush: newIndicesFlushFunc(t), - Forcemerge: newIndicesForcemergeFunc(t), - GetAlias: newIndicesGetAliasFunc(t), - GetDataLifecycle: newIndicesGetDataLifecycleFunc(t), - GetDataStream: newIndicesGetDataStreamFunc(t), - GetFieldMapping: newIndicesGetFieldMappingFunc(t), - GetIndexTemplate: newIndicesGetIndexTemplateFunc(t), - GetMapping: newIndicesGetMappingFunc(t), - Get: newIndicesGetFunc(t), - GetSettings: newIndicesGetSettingsFunc(t), - GetTemplate: newIndicesGetTemplateFunc(t), - MigrateToDataStream: newIndicesMigrateToDataStreamFunc(t), - ModifyDataStream: newIndicesModifyDataStreamFunc(t), - Open: newIndicesOpenFunc(t), - PromoteDataStream: newIndicesPromoteDataStreamFunc(t), - PutAlias: newIndicesPutAliasFunc(t), - PutDataLifecycle: newIndicesPutDataLifecycleFunc(t), - PutIndexTemplate: newIndicesPutIndexTemplateFunc(t), - PutMapping: newIndicesPutMappingFunc(t), - PutSettings: newIndicesPutSettingsFunc(t), - PutTemplate: newIndicesPutTemplateFunc(t), - Recovery: newIndicesRecoveryFunc(t), - Refresh: newIndicesRefreshFunc(t), - ReloadSearchAnalyzers: newIndicesReloadSearchAnalyzersFunc(t), - ResolveCluster: newIndicesResolveClusterFunc(t), - ResolveIndex: newIndicesResolveIndexFunc(t), - Rollover: newIndicesRolloverFunc(t), - Segments: newIndicesSegmentsFunc(t), - ShardStores: newIndicesShardStoresFunc(t), - Shrink: newIndicesShrinkFunc(t), - SimulateIndexTemplate: newIndicesSimulateIndexTemplateFunc(t), - SimulateTemplate: newIndicesSimulateTemplateFunc(t), - Split: newIndicesSplitFunc(t), - Stats: newIndicesStatsFunc(t), - Unfreeze: newIndicesUnfreezeFunc(t), - UpdateAliases: newIndicesUpdateAliasesFunc(t), - ValidateQuery: newIndicesValidateQueryFunc(t), + AddBlock: newIndicesAddBlockFunc(t), + Analyze: newIndicesAnalyzeFunc(t), + CancelMigrateReindex: newIndicesCancelMigrateReindexFunc(t), + ClearCache: newIndicesClearCacheFunc(t), + Clone: newIndicesCloneFunc(t), + Close: newIndicesCloseFunc(t), + CreateDataStream: newIndicesCreateDataStreamFunc(t), + CreateFrom: newIndicesCreateFromFunc(t), + Create: newIndicesCreateFunc(t), + DataStreamsStats: newIndicesDataStreamsStatsFunc(t), + DeleteAlias: newIndicesDeleteAliasFunc(t), + DeleteDataLifecycle: newIndicesDeleteDataLifecycleFunc(t), + DeleteDataStream: newIndicesDeleteDataStreamFunc(t), + DeleteIndexTemplate: newIndicesDeleteIndexTemplateFunc(t), + Delete: newIndicesDeleteFunc(t), + DeleteTemplate: newIndicesDeleteTemplateFunc(t), + DiskUsage: newIndicesDiskUsageFunc(t), + Downsample: newIndicesDownsampleFunc(t), + ExistsAlias: newIndicesExistsAliasFunc(t), + ExistsIndexTemplate: newIndicesExistsIndexTemplateFunc(t), + Exists: newIndicesExistsFunc(t), + ExistsTemplate: newIndicesExistsTemplateFunc(t), + ExplainDataLifecycle: newIndicesExplainDataLifecycleFunc(t), + FieldUsageStats: newIndicesFieldUsageStatsFunc(t), + Flush: newIndicesFlushFunc(t), + Forcemerge: newIndicesForcemergeFunc(t), + GetAlias: newIndicesGetAliasFunc(t), + GetDataLifecycle: newIndicesGetDataLifecycleFunc(t), + GetDataLifecycleStats: newIndicesGetDataLifecycleStatsFunc(t), + GetDataStream: newIndicesGetDataStreamFunc(t), + GetFieldMapping: newIndicesGetFieldMappingFunc(t), + GetIndexTemplate: newIndicesGetIndexTemplateFunc(t), + GetMapping: newIndicesGetMappingFunc(t), + GetMigrateReindexStatus: newIndicesGetMigrateReindexStatusFunc(t), + Get: newIndicesGetFunc(t), + GetSettings: newIndicesGetSettingsFunc(t), + GetTemplate: newIndicesGetTemplateFunc(t), + MigrateReindex: newIndicesMigrateReindexFunc(t), + MigrateToDataStream: newIndicesMigrateToDataStreamFunc(t), + ModifyDataStream: newIndicesModifyDataStreamFunc(t), + Open: newIndicesOpenFunc(t), + PromoteDataStream: newIndicesPromoteDataStreamFunc(t), + PutAlias: newIndicesPutAliasFunc(t), + PutDataLifecycle: newIndicesPutDataLifecycleFunc(t), + PutIndexTemplate: newIndicesPutIndexTemplateFunc(t), + PutMapping: newIndicesPutMappingFunc(t), + PutSettings: newIndicesPutSettingsFunc(t), + PutTemplate: newIndicesPutTemplateFunc(t), + Recovery: newIndicesRecoveryFunc(t), + Refresh: newIndicesRefreshFunc(t), + ReloadSearchAnalyzers: newIndicesReloadSearchAnalyzersFunc(t), + ResolveCluster: newIndicesResolveClusterFunc(t), + ResolveIndex: newIndicesResolveIndexFunc(t), + Rollover: newIndicesRolloverFunc(t), + Segments: newIndicesSegmentsFunc(t), + ShardStores: newIndicesShardStoresFunc(t), + Shrink: newIndicesShrinkFunc(t), + SimulateIndexTemplate: newIndicesSimulateIndexTemplateFunc(t), + SimulateTemplate: newIndicesSimulateTemplateFunc(t), + Split: newIndicesSplitFunc(t), + Stats: newIndicesStatsFunc(t), + Unfreeze: newIndicesUnfreezeFunc(t), + UpdateAliases: newIndicesUpdateAliasesFunc(t), + ValidateQuery: newIndicesValidateQueryFunc(t), }, Ingest: &Ingest{ - DeleteGeoipDatabase: newIngestDeleteGeoipDatabaseFunc(t), - DeletePipeline: newIngestDeletePipelineFunc(t), - GeoIPStats: newIngestGeoIPStatsFunc(t), - GetGeoipDatabase: newIngestGetGeoipDatabaseFunc(t), - GetPipeline: newIngestGetPipelineFunc(t), - ProcessorGrok: newIngestProcessorGrokFunc(t), - PutGeoipDatabase: newIngestPutGeoipDatabaseFunc(t), - PutPipeline: newIngestPutPipelineFunc(t), - Simulate: newIngestSimulateFunc(t), + DeleteGeoipDatabase: newIngestDeleteGeoipDatabaseFunc(t), + DeleteIPLocationDatabase: newIngestDeleteIPLocationDatabaseFunc(t), + DeletePipeline: newIngestDeletePipelineFunc(t), + GeoIPStats: newIngestGeoIPStatsFunc(t), + GetGeoipDatabase: newIngestGetGeoipDatabaseFunc(t), + GetIPLocationDatabase: newIngestGetIPLocationDatabaseFunc(t), + GetPipeline: newIngestGetPipelineFunc(t), + ProcessorGrok: newIngestProcessorGrokFunc(t), + PutGeoipDatabase: newIngestPutGeoipDatabaseFunc(t), + PutIPLocationDatabase: newIngestPutIPLocationDatabaseFunc(t), + PutPipeline: newIngestPutPipelineFunc(t), + Simulate: newIngestSimulateFunc(t), }, Nodes: &Nodes{ ClearRepositoriesMeteringArchive: newNodesClearRepositoriesMeteringArchiveFunc(t), @@ -948,18 +986,19 @@ func New(t Transport) *API { }, Remote: &Remote{}, Snapshot: &Snapshot{ - CleanupRepository: newSnapshotCleanupRepositoryFunc(t), - Clone: newSnapshotCloneFunc(t), - CreateRepository: newSnapshotCreateRepositoryFunc(t), - Create: newSnapshotCreateFunc(t), - DeleteRepository: newSnapshotDeleteRepositoryFunc(t), - Delete: newSnapshotDeleteFunc(t), - GetRepository: newSnapshotGetRepositoryFunc(t), - Get: newSnapshotGetFunc(t), - RepositoryAnalyze: newSnapshotRepositoryAnalyzeFunc(t), - Restore: newSnapshotRestoreFunc(t), - Status: newSnapshotStatusFunc(t), - VerifyRepository: newSnapshotVerifyRepositoryFunc(t), + CleanupRepository: newSnapshotCleanupRepositoryFunc(t), + Clone: newSnapshotCloneFunc(t), + CreateRepository: newSnapshotCreateRepositoryFunc(t), + Create: newSnapshotCreateFunc(t), + DeleteRepository: newSnapshotDeleteRepositoryFunc(t), + Delete: newSnapshotDeleteFunc(t), + GetRepository: newSnapshotGetRepositoryFunc(t), + Get: newSnapshotGetFunc(t), + RepositoryAnalyze: newSnapshotRepositoryAnalyzeFunc(t), + RepositoryVerifyIntegrity: newSnapshotRepositoryVerifyIntegrityFunc(t), + Restore: newSnapshotRestoreFunc(t), + Status: newSnapshotStatusFunc(t), + VerifyRepository: newSnapshotVerifyRepositoryFunc(t), }, Tasks: &Tasks{ Cancel: newTasksCancelFunc(t), @@ -1117,6 +1156,7 @@ func New(t Transport) *API { CreateAPIKey: newSecurityCreateAPIKeyFunc(t), CreateCrossClusterAPIKey: newSecurityCreateCrossClusterAPIKeyFunc(t), CreateServiceToken: newSecurityCreateServiceTokenFunc(t), + DelegatePki: newSecurityDelegatePkiFunc(t), DeletePrivileges: newSecurityDeletePrivilegesFunc(t), DeleteRoleMapping: newSecurityDeleteRoleMappingFunc(t), DeleteRole: newSecurityDeleteRoleFunc(t), diff --git a/esapi/api.bulk.go b/esapi/api.bulk.go index 8389c75ce4..cc25b463be 100644 --- a/esapi/api.bulk.go +++ b/esapi/api.bulk.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -56,6 +56,7 @@ type BulkRequest struct { Body io.Reader + IncludeSourceOnError *bool ListExecutedPipelines *bool Pipeline string Refresh string @@ -114,6 +115,10 @@ func (r BulkRequest) Do(providedCtx context.Context, transport Transport) (*Resp params = make(map[string]string) + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + if r.ListExecutedPipelines != nil { params["list_executed_pipelines"] = strconv.FormatBool(*r.ListExecutedPipelines) } @@ -254,6 +259,13 @@ func (f Bulk) WithIndex(v string) func(*BulkRequest) { } } +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Bulk) WithIncludeSourceOnError(v bool) func(*BulkRequest) { + return func(r *BulkRequest) { + r.IncludeSourceOnError = &v + } +} + // WithListExecutedPipelines - sets list_executed_pipelines for all incoming documents. defaults to unset (false). func (f Bulk) WithListExecutedPipelines(v bool) func(*BulkRequest) { return func(r *BulkRequest) { diff --git a/esapi/api.capabilities.go b/esapi/api.capabilities.go index 514be3f474..4eab17e2ba 100644 --- a/esapi/api.capabilities.go +++ b/esapi/api.capabilities.go @@ -15,13 +15,14 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi import ( "context" "net/http" + "strconv" "strings" ) @@ -52,6 +53,7 @@ type Capabilities func(o ...func(*CapabilitiesRequest)) (*Response, error) // CapabilitiesRequest configures the Capabilities API request. type CapabilitiesRequest struct { Capabilities string + LocalOnly *bool Method string Parameters string Path string @@ -97,6 +99,10 @@ func (r CapabilitiesRequest) Do(providedCtx context.Context, transport Transport params["capabilities"] = r.Capabilities } + if r.LocalOnly != nil { + params["local_only"] = strconv.FormatBool(*r.LocalOnly) + } + if r.Method != "" { params["method"] = r.Method } @@ -194,6 +200,13 @@ func (f Capabilities) WithCapabilities(v string) func(*CapabilitiesRequest) { } } +// WithLocalOnly - true if only the node being called should be considered. +func (f Capabilities) WithLocalOnly(v bool) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.LocalOnly = &v + } +} + // WithMethod - rest method to check. func (f Capabilities) WithMethod(v string) func(*CapabilitiesRequest) { return func(r *CapabilitiesRequest) { diff --git a/esapi/api.cat.aliases.go b/esapi/api.cat.aliases.go index 109a6c7825..dffa970019 100644 --- a/esapi/api.cat.aliases.go +++ b/esapi/api.cat.aliases.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.allocation.go b/esapi/api.cat.allocation.go index ceb6fa6287..ed92193643 100644 --- a/esapi/api.cat.allocation.go +++ b/esapi/api.cat.allocation.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.component_templates.go b/esapi/api.cat.component_templates.go index 1a7532f282..0ded5ba2b2 100644 --- a/esapi/api.cat.component_templates.go +++ b/esapi/api.cat.component_templates.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.count.go b/esapi/api.cat.count.go index 227420c3f9..3b6bdeac77 100644 --- a/esapi/api.cat.count.go +++ b/esapi/api.cat.count.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.fielddata.go b/esapi/api.cat.fielddata.go index c62adb1c1f..a2bf411077 100644 --- a/esapi/api.cat.fielddata.go +++ b/esapi/api.cat.fielddata.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.health.go b/esapi/api.cat.health.go index c5e1c5f1bf..5e9e854dbf 100644 --- a/esapi/api.cat.health.go +++ b/esapi/api.cat.health.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.help.go b/esapi/api.cat.help.go index a03a4f52cc..4dd36f277d 100644 --- a/esapi/api.cat.help.go +++ b/esapi/api.cat.help.go @@ -15,14 +15,13 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi import ( "context" "net/http" - "strconv" "strings" ) @@ -50,9 +49,6 @@ type CatHelp func(o ...func(*CatHelpRequest)) (*Response, error) // CatHelpRequest configures the Cat Help API request. type CatHelpRequest struct { - Help *bool - S []string - Pretty bool Human bool ErrorTrace bool @@ -90,14 +86,6 @@ func (r CatHelpRequest) Do(providedCtx context.Context, transport Transport) (*R params = make(map[string]string) - if r.Help != nil { - params["help"] = strconv.FormatBool(*r.Help) - } - - if len(r.S) > 0 { - params["s"] = strings.Join(r.S, ",") - } - if r.Pretty { params["pretty"] = "true" } @@ -176,20 +164,6 @@ func (f CatHelp) WithContext(v context.Context) func(*CatHelpRequest) { } } -// WithHelp - return help information. -func (f CatHelp) WithHelp(v bool) func(*CatHelpRequest) { - return func(r *CatHelpRequest) { - r.Help = &v - } -} - -// WithS - comma-separated list of column names or column aliases to sort by. -func (f CatHelp) WithS(v ...string) func(*CatHelpRequest) { - return func(r *CatHelpRequest) { - r.S = v - } -} - // WithPretty makes the response body pretty-printed. func (f CatHelp) WithPretty() func(*CatHelpRequest) { return func(r *CatHelpRequest) { diff --git a/esapi/api.cat.indices.go b/esapi/api.cat.indices.go index f7081d8c5f..b6ffa13afc 100644 --- a/esapi/api.cat.indices.go +++ b/esapi/api.cat.indices.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.master.go b/esapi/api.cat.master.go index a13b9ab237..abf31b2c53 100644 --- a/esapi/api.cat.master.go +++ b/esapi/api.cat.master.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.nodeattrs.go b/esapi/api.cat.nodeattrs.go index f017502db2..73d802a938 100644 --- a/esapi/api.cat.nodeattrs.go +++ b/esapi/api.cat.nodeattrs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.nodes.go b/esapi/api.cat.nodes.go index 2b7b0be9a5..0526c4c3d2 100644 --- a/esapi/api.cat.nodes.go +++ b/esapi/api.cat.nodes.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.pending_tasks.go b/esapi/api.cat.pending_tasks.go index de66e85cf4..b8167c3d34 100644 --- a/esapi/api.cat.pending_tasks.go +++ b/esapi/api.cat.pending_tasks.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.plugins.go b/esapi/api.cat.plugins.go index 14ac83ca9c..857dfd180a 100644 --- a/esapi/api.cat.plugins.go +++ b/esapi/api.cat.plugins.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.recovery.go b/esapi/api.cat.recovery.go index 1ebc1ffd5f..664345adfe 100644 --- a/esapi/api.cat.recovery.go +++ b/esapi/api.cat.recovery.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.repositories.go b/esapi/api.cat.repositories.go index 7cdeabade6..d2dd2055ff 100644 --- a/esapi/api.cat.repositories.go +++ b/esapi/api.cat.repositories.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.segments.go b/esapi/api.cat.segments.go index 4139ff08ec..13649f3ca0 100644 --- a/esapi/api.cat.segments.go +++ b/esapi/api.cat.segments.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "net/http" "strconv" "strings" + "time" ) func newCatSegmentsFunc(t Transport) CatSegments { @@ -52,12 +53,14 @@ type CatSegments func(o ...func(*CatSegmentsRequest)) (*Response, error) type CatSegmentsRequest struct { Index []string - Bytes string - Format string - H []string - Help *bool - S []string - V *bool + Bytes string + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool Pretty bool Human bool @@ -122,6 +125,14 @@ func (r CatSegmentsRequest) Do(providedCtx context.Context, transport Transport) params["help"] = strconv.FormatBool(*r.Help) } + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + if len(r.S) > 0 { params["s"] = strings.Join(r.S, ",") } @@ -243,6 +254,20 @@ func (f CatSegments) WithHelp(v bool) func(*CatSegmentsRequest) { } } +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatSegments) WithLocal(v bool) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatSegments) WithMasterTimeout(v time.Duration) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.MasterTimeout = v + } +} + // WithS - comma-separated list of column names or column aliases to sort by. func (f CatSegments) WithS(v ...string) func(*CatSegmentsRequest) { return func(r *CatSegmentsRequest) { diff --git a/esapi/api.cat.shards.go b/esapi/api.cat.shards.go index 403f4b4839..7739d33582 100644 --- a/esapi/api.cat.shards.go +++ b/esapi/api.cat.shards.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.snapshots.go b/esapi/api.cat.snapshots.go index 74d8ca72f4..640c2321c4 100644 --- a/esapi/api.cat.snapshots.go +++ b/esapi/api.cat.snapshots.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.tasks.go b/esapi/api.cat.tasks.go index de5171ff85..188b9ffc98 100644 --- a/esapi/api.cat.tasks.go +++ b/esapi/api.cat.tasks.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "net/http" "strconv" "strings" + "time" ) func newCatTasksFunc(t Transport) CatTasks { @@ -52,16 +53,18 @@ type CatTasks func(o ...func(*CatTasksRequest)) (*Response, error) // CatTasksRequest configures the Cat Tasks API request. type CatTasksRequest struct { - Actions []string - Detailed *bool - Format string - H []string - Help *bool - Nodes []string - ParentTaskID string - S []string - Time string - V *bool + Actions []string + Detailed *bool + Format string + H []string + Help *bool + Nodes []string + ParentTaskID string + S []string + Time string + Timeout time.Duration + V *bool + WaitForCompletion *bool Pretty bool Human bool @@ -136,10 +139,18 @@ func (r CatTasksRequest) Do(providedCtx context.Context, transport Transport) (* params["time"] = r.Time } + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.V != nil { params["v"] = strconv.FormatBool(*r.V) } + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + if r.Pretty { params["pretty"] = "true" } @@ -281,6 +292,13 @@ func (f CatTasks) WithTime(v string) func(*CatTasksRequest) { } } +// WithTimeout - period to wait for a response. if no response is received before the timeout expires, the request fails and returns an error.. +func (f CatTasks) WithTimeout(v time.Duration) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Timeout = v + } +} + // WithV - verbose mode. display column headers. func (f CatTasks) WithV(v bool) func(*CatTasksRequest) { return func(r *CatTasksRequest) { @@ -288,6 +306,13 @@ func (f CatTasks) WithV(v bool) func(*CatTasksRequest) { } } +// WithWaitForCompletion - if `true`, the request blocks until the task has completed.. +func (f CatTasks) WithWaitForCompletion(v bool) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.WaitForCompletion = &v + } +} + // WithPretty makes the response body pretty-printed. func (f CatTasks) WithPretty() func(*CatTasksRequest) { return func(r *CatTasksRequest) { diff --git a/esapi/api.cat.templates.go b/esapi/api.cat.templates.go index 2cff2b8746..17fcf39bba 100644 --- a/esapi/api.cat.templates.go +++ b/esapi/api.cat.templates.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cat.thread_pool.go b/esapi/api.cat.thread_pool.go index af889c1869..02704b1716 100644 --- a/esapi/api.cat.thread_pool.go +++ b/esapi/api.cat.thread_pool.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.clear_scroll.go b/esapi/api.clear_scroll.go index 9aa6cc8195..37d79921a2 100644 --- a/esapi/api.clear_scroll.go +++ b/esapi/api.clear_scroll.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.allocation_explain.go b/esapi/api.cluster.allocation_explain.go index 385e0ad03d..df6690c435 100644 --- a/esapi/api.cluster.allocation_explain.go +++ b/esapi/api.cluster.allocation_explain.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.delete_component_template.go b/esapi/api.cluster.delete_component_template.go index 704d056a41..57dd9ebea8 100644 --- a/esapi/api.cluster.delete_component_template.go +++ b/esapi/api.cluster.delete_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.delete_voting_config_exclusions.go b/esapi/api.cluster.delete_voting_config_exclusions.go index 497fa212cd..67ca87ebbd 100644 --- a/esapi/api.cluster.delete_voting_config_exclusions.go +++ b/esapi/api.cluster.delete_voting_config_exclusions.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.exists_component_template.go b/esapi/api.cluster.exists_component_template.go index 6802b1b1e7..f428930c9c 100644 --- a/esapi/api.cluster.exists_component_template.go +++ b/esapi/api.cluster.exists_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.get_component_template.go b/esapi/api.cluster.get_component_template.go index 7445496f6c..d6b8c7f41e 100644 --- a/esapi/api.cluster.get_component_template.go +++ b/esapi/api.cluster.get_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.get_settings.go b/esapi/api.cluster.get_settings.go index 728bdacb3f..a34b922ff3 100644 --- a/esapi/api.cluster.get_settings.go +++ b/esapi/api.cluster.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.health.go b/esapi/api.cluster.health.go index 2349ec537d..6b55ffb464 100644 --- a/esapi/api.cluster.health.go +++ b/esapi/api.cluster.health.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.info.go b/esapi/api.cluster.info.go index 2b74fc438e..a3b193a66c 100644 --- a/esapi/api.cluster.info.go +++ b/esapi/api.cluster.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.pending_tasks.go b/esapi/api.cluster.pending_tasks.go index 616059e56d..5c3f9a9157 100644 --- a/esapi/api.cluster.pending_tasks.go +++ b/esapi/api.cluster.pending_tasks.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.post_voting_config_exclusions.go b/esapi/api.cluster.post_voting_config_exclusions.go index e018d275e6..2d8cb54006 100644 --- a/esapi/api.cluster.post_voting_config_exclusions.go +++ b/esapi/api.cluster.post_voting_config_exclusions.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.put_component_template.go b/esapi/api.cluster.put_component_template.go index 977724aca8..94f4268998 100644 --- a/esapi/api.cluster.put_component_template.go +++ b/esapi/api.cluster.put_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.put_settings.go b/esapi/api.cluster.put_settings.go index 0f9b9c74d9..2f76212b9a 100644 --- a/esapi/api.cluster.put_settings.go +++ b/esapi/api.cluster.put_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.remote_info.go b/esapi/api.cluster.remote_info.go index 2f46cceafd..436caf3016 100644 --- a/esapi/api.cluster.remote_info.go +++ b/esapi/api.cluster.remote_info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.reroute.go b/esapi/api.cluster.reroute.go index d2487f1af6..45b645adeb 100644 --- a/esapi/api.cluster.reroute.go +++ b/esapi/api.cluster.reroute.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.state.go b/esapi/api.cluster.state.go index dd219a7c91..f4505c705b 100644 --- a/esapi/api.cluster.state.go +++ b/esapi/api.cluster.state.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.cluster.stats.go b/esapi/api.cluster.stats.go index 4c4146c522..2f06a16ef6 100644 --- a/esapi/api.cluster.stats.go +++ b/esapi/api.cluster.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -53,8 +53,8 @@ type ClusterStats func(o ...func(*ClusterStatsRequest)) (*Response, error) type ClusterStatsRequest struct { NodeID []string - FlatSettings *bool - Timeout time.Duration + IncludeRemotes *bool + Timeout time.Duration Pretty bool Human bool @@ -101,8 +101,8 @@ func (r ClusterStatsRequest) Do(providedCtx context.Context, transport Transport params = make(map[string]string) - if r.FlatSettings != nil { - params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + if r.IncludeRemotes != nil { + params["include_remotes"] = strconv.FormatBool(*r.IncludeRemotes) } if r.Timeout != 0 { @@ -194,10 +194,10 @@ func (f ClusterStats) WithNodeID(v ...string) func(*ClusterStatsRequest) { } } -// WithFlatSettings - return settings in flat format (default: false). -func (f ClusterStats) WithFlatSettings(v bool) func(*ClusterStatsRequest) { +// WithIncludeRemotes - include remote cluster data into the response (default: false). +func (f ClusterStats) WithIncludeRemotes(v bool) func(*ClusterStatsRequest) { return func(r *ClusterStatsRequest) { - r.FlatSettings = &v + r.IncludeRemotes = &v } } diff --git a/esapi/api.connector.check_in.go b/esapi/api.connector.check_in.go index 32f435fb91..0e04a1fbe6 100644 --- a/esapi/api.connector.check_in.go +++ b/esapi/api.connector.check_in.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.delete.go b/esapi/api.connector.delete.go index a4749cb87e..a08f91833a 100644 --- a/esapi/api.connector.delete.go +++ b/esapi/api.connector.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.get.go b/esapi/api.connector.get.go index 0a2a6081a9..6c13364fc1 100644 --- a/esapi/api.connector.get.go +++ b/esapi/api.connector.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.last_sync.go b/esapi/api.connector.last_sync.go index ce07ab1325..74d2cec602 100644 --- a/esapi/api.connector.last_sync.go +++ b/esapi/api.connector.last_sync.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.list.go b/esapi/api.connector.list.go index d9d61f7999..2dcd459c27 100644 --- a/esapi/api.connector.list.go +++ b/esapi/api.connector.list.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.post.go b/esapi/api.connector.post.go index 1810ba76d5..35efd4f02d 100644 --- a/esapi/api.connector.post.go +++ b/esapi/api.connector.post.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.put.go b/esapi/api.connector.put.go index c710e831ae..13c86d82c6 100644 --- a/esapi/api.connector.put.go +++ b/esapi/api.connector.put.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.secret_delete.go b/esapi/api.connector.secret_delete.go index dd877e4f49..96de5dfcfd 100644 --- a/esapi/api.connector.secret_delete.go +++ b/esapi/api.connector.secret_delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.secret_get.go b/esapi/api.connector.secret_get.go index a860294d7a..a616b57922 100644 --- a/esapi/api.connector.secret_get.go +++ b/esapi/api.connector.secret_get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.secret_post.go b/esapi/api.connector.secret_post.go index fe1dc7c16d..3b9c880b9e 100644 --- a/esapi/api.connector.secret_post.go +++ b/esapi/api.connector.secret_post.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.secret_put.go b/esapi/api.connector.secret_put.go index 563432118a..4fa9b05ae3 100644 --- a/esapi/api.connector.secret_put.go +++ b/esapi/api.connector.secret_put.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_cancel.go b/esapi/api.connector.sync_job_cancel.go index 78a1ce91a0..1a23abde1e 100644 --- a/esapi/api.connector.sync_job_cancel.go +++ b/esapi/api.connector.sync_job_cancel.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_check_in.go b/esapi/api.connector.sync_job_check_in.go index e75b083acb..ed8bc7678c 100644 --- a/esapi/api.connector.sync_job_check_in.go +++ b/esapi/api.connector.sync_job_check_in.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_claim.go b/esapi/api.connector.sync_job_claim.go index 10a0e26422..5175933b22 100644 --- a/esapi/api.connector.sync_job_claim.go +++ b/esapi/api.connector.sync_job_claim.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_delete.go b/esapi/api.connector.sync_job_delete.go index c22be45922..37e6b2bda1 100644 --- a/esapi/api.connector.sync_job_delete.go +++ b/esapi/api.connector.sync_job_delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_error.go b/esapi/api.connector.sync_job_error.go index 9e71bdc7b5..114f8d04fa 100644 --- a/esapi/api.connector.sync_job_error.go +++ b/esapi/api.connector.sync_job_error.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_get.go b/esapi/api.connector.sync_job_get.go index 7db21eef5f..5606558ca6 100644 --- a/esapi/api.connector.sync_job_get.go +++ b/esapi/api.connector.sync_job_get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_list.go b/esapi/api.connector.sync_job_list.go index 508206e768..06a315f51c 100644 --- a/esapi/api.connector.sync_job_list.go +++ b/esapi/api.connector.sync_job_list.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_post.go b/esapi/api.connector.sync_job_post.go index 97bf8f023d..61b05aeb0e 100644 --- a/esapi/api.connector.sync_job_post.go +++ b/esapi/api.connector.sync_job_post.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.sync_job_update_stats.go b/esapi/api.connector.sync_job_update_stats.go index a4e5791280..2a70994ba7 100644 --- a/esapi/api.connector.sync_job_update_stats.go +++ b/esapi/api.connector.sync_job_update_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_active_filtering.go b/esapi/api.connector.update_active_filtering.go index af425f2c52..1dc36eb84d 100644 --- a/esapi/api.connector.update_active_filtering.go +++ b/esapi/api.connector.update_active_filtering.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_api_key_id.go b/esapi/api.connector.update_api_key_id.go index 4fe903e2a0..e9499fb2a1 100644 --- a/esapi/api.connector.update_api_key_id.go +++ b/esapi/api.connector.update_api_key_id.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_configuration.go b/esapi/api.connector.update_configuration.go index 667e0683f4..15321947e3 100644 --- a/esapi/api.connector.update_configuration.go +++ b/esapi/api.connector.update_configuration.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_error.go b/esapi/api.connector.update_error.go index 68ab7432bf..d947daa54b 100644 --- a/esapi/api.connector.update_error.go +++ b/esapi/api.connector.update_error.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_features.go b/esapi/api.connector.update_features.go index a645dc7741..8cbe41c426 100644 --- a/esapi/api.connector.update_features.go +++ b/esapi/api.connector.update_features.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_filtering.go b/esapi/api.connector.update_filtering.go index 3a96096b80..fa9d8607e1 100644 --- a/esapi/api.connector.update_filtering.go +++ b/esapi/api.connector.update_filtering.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_filtering_validation.go b/esapi/api.connector.update_filtering_validation.go index 5bbb9d1126..7878a745e1 100644 --- a/esapi/api.connector.update_filtering_validation.go +++ b/esapi/api.connector.update_filtering_validation.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_index_name.go b/esapi/api.connector.update_index_name.go index 8cf47b74c5..ed42939b2b 100644 --- a/esapi/api.connector.update_index_name.go +++ b/esapi/api.connector.update_index_name.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_name.go b/esapi/api.connector.update_name.go index c540c2b407..1cdb4da464 100644 --- a/esapi/api.connector.update_name.go +++ b/esapi/api.connector.update_name.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_native.go b/esapi/api.connector.update_native.go index dfbd7aaeaf..388ee96edf 100644 --- a/esapi/api.connector.update_native.go +++ b/esapi/api.connector.update_native.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_pipeline.go b/esapi/api.connector.update_pipeline.go index dbc5da91b4..8b68ee48e7 100644 --- a/esapi/api.connector.update_pipeline.go +++ b/esapi/api.connector.update_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_scheduling.go b/esapi/api.connector.update_scheduling.go index 197ac84c83..3304602051 100644 --- a/esapi/api.connector.update_scheduling.go +++ b/esapi/api.connector.update_scheduling.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_service_type.go b/esapi/api.connector.update_service_type.go index a3ea51318e..946ccaca9b 100644 --- a/esapi/api.connector.update_service_type.go +++ b/esapi/api.connector.update_service_type.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.connector.update_status.go b/esapi/api.connector.update_status.go index a172bfdbba..17e6287a93 100644 --- a/esapi/api.connector.update_status.go +++ b/esapi/api.connector.update_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.count.go b/esapi/api.count.go index cbbf22707d..000eb4a076 100644 --- a/esapi/api.count.go +++ b/esapi/api.count.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.create.go b/esapi/api.create.go index 5205b3b04b..c2f9aaa8bd 100644 --- a/esapi/api.create.go +++ b/esapi/api.create.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -59,13 +59,14 @@ type CreateRequest struct { Body io.Reader - Pipeline string - Refresh string - Routing string - Timeout time.Duration - Version *int - VersionType string - WaitForActiveShards string + IncludeSourceOnError *bool + Pipeline string + Refresh string + Routing string + Timeout time.Duration + Version *int + VersionType string + WaitForActiveShards string Pretty bool Human bool @@ -115,6 +116,10 @@ func (r CreateRequest) Do(providedCtx context.Context, transport Transport) (*Re params = make(map[string]string) + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + if r.Pipeline != "" { params["pipeline"] = r.Pipeline } @@ -228,6 +233,13 @@ func (f Create) WithContext(v context.Context) func(*CreateRequest) { } } +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Create) WithIncludeSourceOnError(v bool) func(*CreateRequest) { + return func(r *CreateRequest) { + r.IncludeSourceOnError = &v + } +} + // WithPipeline - the pipeline ID to preprocess incoming documents with. func (f Create) WithPipeline(v string) func(*CreateRequest) { return func(r *CreateRequest) { diff --git a/esapi/api.dangling_indices.delete_dangling_index.go b/esapi/api.dangling_indices.delete_dangling_index.go index 2fb1275f44..6251d2c50b 100644 --- a/esapi/api.dangling_indices.delete_dangling_index.go +++ b/esapi/api.dangling_indices.delete_dangling_index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.dangling_indices.import_dangling_index.go b/esapi/api.dangling_indices.import_dangling_index.go index 8b1bae064f..8490189a5d 100644 --- a/esapi/api.dangling_indices.import_dangling_index.go +++ b/esapi/api.dangling_indices.import_dangling_index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.dangling_indices.list_dangling_indices.go b/esapi/api.dangling_indices.list_dangling_indices.go index 2c7daf7395..82352fc73c 100644 --- a/esapi/api.dangling_indices.list_dangling_indices.go +++ b/esapi/api.dangling_indices.list_dangling_indices.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.delete.go b/esapi/api.delete.go index b6d3de072c..b8b8ff0887 100644 --- a/esapi/api.delete.go +++ b/esapi/api.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.delete_by_query.go b/esapi/api.delete_by_query.go index da43f04daa..0aefc35c6f 100644 --- a/esapi/api.delete_by_query.go +++ b/esapi/api.delete_by_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.delete_by_query_rethrottle.go b/esapi/api.delete_by_query_rethrottle.go index d1441e6683..281980e42e 100644 --- a/esapi/api.delete_by_query_rethrottle.go +++ b/esapi/api.delete_by_query_rethrottle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.delete_script.go b/esapi/api.delete_script.go index 7f22136968..165607dead 100644 --- a/esapi/api.delete_script.go +++ b/esapi/api.delete_script.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.exists.go b/esapi/api.exists.go index 6ce32ba5a7..55263c7ee0 100644 --- a/esapi/api.exists.go +++ b/esapi/api.exists.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.exists_source.go b/esapi/api.exists_source.go index 3199ebb08f..5d8d6afc33 100644 --- a/esapi/api.exists_source.go +++ b/esapi/api.exists_source.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.explain.go b/esapi/api.explain.go index a63bffc1e1..319caa2e03 100644 --- a/esapi/api.explain.go +++ b/esapi/api.explain.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.features.get_features.go b/esapi/api.features.get_features.go index 055f83c274..72226dace4 100644 --- a/esapi/api.features.get_features.go +++ b/esapi/api.features.get_features.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.features.reset_features.go b/esapi/api.features.reset_features.go index fcce6e3695..8a19d74739 100644 --- a/esapi/api.features.reset_features.go +++ b/esapi/api.features.reset_features.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.field_caps.go b/esapi/api.field_caps.go index 1de29546ac..ce840738c9 100644 --- a/esapi/api.field_caps.go +++ b/esapi/api.field_caps.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.delete_secret.go b/esapi/api.fleet.delete_secret.go index 496249a517..87dff14b29 100644 --- a/esapi/api.fleet.delete_secret.go +++ b/esapi/api.fleet.delete_secret.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.get_secret.go b/esapi/api.fleet.get_secret.go index 0ed9da0fb2..e6b9201a3d 100644 --- a/esapi/api.fleet.get_secret.go +++ b/esapi/api.fleet.get_secret.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.global_checkpoints.go b/esapi/api.fleet.global_checkpoints.go index cf9ab110ea..fcb5aedef7 100644 --- a/esapi/api.fleet.global_checkpoints.go +++ b/esapi/api.fleet.global_checkpoints.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.msearch.go b/esapi/api.fleet.msearch.go index a848d1ef92..e1d7e5e0be 100644 --- a/esapi/api.fleet.msearch.go +++ b/esapi/api.fleet.msearch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.post_secret.go b/esapi/api.fleet.post_secret.go index 1aefeaef20..c6ae9a669e 100644 --- a/esapi/api.fleet.post_secret.go +++ b/esapi/api.fleet.post_secret.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.fleet.search.go b/esapi/api.fleet.search.go index b9764f73ed..44885f6130 100644 --- a/esapi/api.fleet.search.go +++ b/esapi/api.fleet.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.get.go b/esapi/api.get.go index 561ab84669..402d00eede 100644 --- a/esapi/api.get.go +++ b/esapi/api.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.get_script.go b/esapi/api.get_script.go index abbb0ea08c..5bf3e73da7 100644 --- a/esapi/api.get_script.go +++ b/esapi/api.get_script.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.get_script_context.go b/esapi/api.get_script_context.go index 58add2132d..05daedd006 100644 --- a/esapi/api.get_script_context.go +++ b/esapi/api.get_script_context.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.get_script_languages.go b/esapi/api.get_script_languages.go index 6111542e00..c109b8640b 100644 --- a/esapi/api.get_script_languages.go +++ b/esapi/api.get_script_languages.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.get_source.go b/esapi/api.get_source.go index 36fa4fd7a3..17b8660886 100644 --- a/esapi/api.get_source.go +++ b/esapi/api.get_source.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.health_report.go b/esapi/api.health_report.go index 276602a3d4..dcb48d290c 100644 --- a/esapi/api.health_report.go +++ b/esapi/api.health_report.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.index.go b/esapi/api.index.go index 50f7b60517..c05f4002d3 100644 --- a/esapi/api.index.go +++ b/esapi/api.index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -57,18 +57,19 @@ type IndexRequest struct { Body io.Reader - IfPrimaryTerm *int - IfSeqNo *int - OpType string - Pipeline string - Refresh string - RequireAlias *bool - RequireDataStream *bool - Routing string - Timeout time.Duration - Version *int - VersionType string - WaitForActiveShards string + IfPrimaryTerm *int + IfSeqNo *int + IncludeSourceOnError *bool + OpType string + Pipeline string + Refresh string + RequireAlias *bool + RequireDataStream *bool + Routing string + Timeout time.Duration + Version *int + VersionType string + WaitForActiveShards string Pretty bool Human bool @@ -132,6 +133,10 @@ func (r IndexRequest) Do(providedCtx context.Context, transport Transport) (*Res params["if_seq_no"] = strconv.FormatInt(int64(*r.IfSeqNo), 10) } + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + if r.OpType != "" { params["op_type"] = r.OpType } @@ -278,6 +283,13 @@ func (f Index) WithIfSeqNo(v int) func(*IndexRequest) { } } +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Index) WithIncludeSourceOnError(v bool) func(*IndexRequest) { + return func(r *IndexRequest) { + r.IncludeSourceOnError = &v + } +} + // WithOpType - explicit operation type. defaults to `index` for requests with an explicit document ID, and to `create`for requests without an explicit document ID. func (f Index) WithOpType(v string) func(*IndexRequest) { return func(r *IndexRequest) { diff --git a/esapi/api.indices.add_block.go b/esapi/api.indices.add_block.go index 4297642f49..ad5acea5cc 100644 --- a/esapi/api.indices.add_block.go +++ b/esapi/api.indices.add_block.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.analyze.go b/esapi/api.indices.analyze.go index accf120546..cff75046e8 100644 --- a/esapi/api.indices.analyze.go +++ b/esapi/api.indices.analyze.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.cancel_migrate_reindex.go b/esapi/api.indices.cancel_migrate_reindex.go new file mode 100644 index 0000000000..5f36fd7b9d --- /dev/null +++ b/esapi/api.indices.cancel_migrate_reindex.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesCancelMigrateReindexFunc(t Transport) IndicesCancelMigrateReindex { + return func(index string, o ...func(*IndicesCancelMigrateReindexRequest)) (*Response, error) { + var r = IndicesCancelMigrateReindexRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesCancelMigrateReindex this API returns the status of a migration reindex attempt for a data stream or index +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex-cancel-api.html. +type IndicesCancelMigrateReindex func(index string, o ...func(*IndicesCancelMigrateReindexRequest)) (*Response, error) + +// IndicesCancelMigrateReindexRequest configures the Indices Cancel Migrate Reindex API request. +type IndicesCancelMigrateReindexRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCancelMigrateReindexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_migration") + 1 + len("reindex") + 1 + len(r.Index) + 1 + len("_cancel")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_cancel") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.cancel_migrate_reindex") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.cancel_migrate_reindex") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesCancelMigrateReindex) WithContext(v context.Context) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesCancelMigrateReindex) WithPretty() func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesCancelMigrateReindex) WithHuman() func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesCancelMigrateReindex) WithErrorTrace() func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesCancelMigrateReindex) WithFilterPath(v ...string) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesCancelMigrateReindex) WithHeader(h map[string]string) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesCancelMigrateReindex) WithOpaqueID(s string) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.clear_cache.go b/esapi/api.indices.clear_cache.go index 20c931cbfb..5f9cf383a4 100644 --- a/esapi/api.indices.clear_cache.go +++ b/esapi/api.indices.clear_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.clone.go b/esapi/api.indices.clone.go index 2754d3f989..174857e1de 100644 --- a/esapi/api.indices.clone.go +++ b/esapi/api.indices.clone.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.close.go b/esapi/api.indices.close.go index 3601cd94cd..12300240b3 100644 --- a/esapi/api.indices.close.go +++ b/esapi/api.indices.close.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.create.go b/esapi/api.indices.create.go index 4356d58eb4..812e023ffe 100644 --- a/esapi/api.indices.create.go +++ b/esapi/api.indices.create.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.create_from.go b/esapi/api.indices.create_from.go new file mode 100644 index 0000000000..f907a31b85 --- /dev/null +++ b/esapi/api.indices.create_from.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIndicesCreateFromFunc(t Transport) IndicesCreateFrom { + return func(dest string, source string, o ...func(*IndicesCreateFromRequest)) (*Response, error) { + var r = IndicesCreateFromRequest{Dest: dest, Source: source} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesCreateFrom this API creates a destination from a source index. It copies the mappings and settings from the source index while allowing request settings and mappings to override the source values. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index-from-source.html. +type IndicesCreateFrom func(dest string, source string, o ...func(*IndicesCreateFromRequest)) (*Response, error) + +// IndicesCreateFromRequest configures the Indices Create From API request. +type IndicesCreateFromRequest struct { + Body io.Reader + + Dest string + Source string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCreateFromRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create_from") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_create_from") + 1 + len(r.Source) + 1 + len(r.Dest)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_create_from") + path.WriteString("/") + path.WriteString(r.Source) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "source", r.Source) + } + path.WriteString("/") + path.WriteString(r.Dest) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "dest", r.Dest) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create_from") + if reader := instrument.RecordRequestBody(ctx, "indices.create_from", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create_from") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesCreateFrom) WithContext(v context.Context) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.ctx = v + } +} + +// WithBody - The body contains the fields `mappings_override`, `settings_override`, and `remove_index_blocks`.. +func (f IndicesCreateFrom) WithBody(v io.Reader) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesCreateFrom) WithPretty() func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesCreateFrom) WithHuman() func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesCreateFrom) WithErrorTrace() func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesCreateFrom) WithFilterPath(v ...string) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesCreateFrom) WithHeader(h map[string]string) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesCreateFrom) WithOpaqueID(s string) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.delete.go b/esapi/api.indices.delete.go index 449da765c5..1d5385e13c 100644 --- a/esapi/api.indices.delete.go +++ b/esapi/api.indices.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.delete_alias.go b/esapi/api.indices.delete_alias.go index c59086b934..6e6458e2e7 100644 --- a/esapi/api.indices.delete_alias.go +++ b/esapi/api.indices.delete_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.delete_data_lifecycle.go b/esapi/api.indices.delete_data_lifecycle.go index 70f0bd18a1..8841d7fbce 100644 --- a/esapi/api.indices.delete_data_lifecycle.go +++ b/esapi/api.indices.delete_data_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -46,8 +46,6 @@ func newIndicesDeleteDataLifecycleFunc(t Transport) IndicesDeleteDataLifecycle { // IndicesDeleteDataLifecycle deletes the data stream lifecycle of the selected data streams. // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html. type IndicesDeleteDataLifecycle func(name []string, o ...func(*IndicesDeleteDataLifecycleRequest)) (*Response, error) diff --git a/esapi/api.indices.delete_index_template.go b/esapi/api.indices.delete_index_template.go index 5babf1c954..858ff019a5 100644 --- a/esapi/api.indices.delete_index_template.go +++ b/esapi/api.indices.delete_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.delete_template.go b/esapi/api.indices.delete_template.go index 6b7d5b4e70..89e06372e1 100644 --- a/esapi/api.indices.delete_template.go +++ b/esapi/api.indices.delete_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.disk_usage.go b/esapi/api.indices.disk_usage.go index e0a94adfa5..6d6997f6fe 100644 --- a/esapi/api.indices.disk_usage.go +++ b/esapi/api.indices.disk_usage.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.downsample.go b/esapi/api.indices.downsample.go index 3db0e6de86..d5a7497b21 100644 --- a/esapi/api.indices.downsample.go +++ b/esapi/api.indices.downsample.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.exists.go b/esapi/api.indices.exists.go index 3bd22065ca..b4b5449d5f 100644 --- a/esapi/api.indices.exists.go +++ b/esapi/api.indices.exists.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.exists_alias.go b/esapi/api.indices.exists_alias.go index 1dafdb9309..4dc4b49bb3 100644 --- a/esapi/api.indices.exists_alias.go +++ b/esapi/api.indices.exists_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.exists_index_template.go b/esapi/api.indices.exists_index_template.go index 211d8b4885..2364a4bf88 100644 --- a/esapi/api.indices.exists_index_template.go +++ b/esapi/api.indices.exists_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.exists_template.go b/esapi/api.indices.exists_template.go index 3dcdf54977..4742fb4bee 100644 --- a/esapi/api.indices.exists_template.go +++ b/esapi/api.indices.exists_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.explain_data_lifecycle.go b/esapi/api.indices.explain_data_lifecycle.go index 6b0df98cd3..1d0641ac37 100644 --- a/esapi/api.indices.explain_data_lifecycle.go +++ b/esapi/api.indices.explain_data_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -46,8 +46,6 @@ func newIndicesExplainDataLifecycleFunc(t Transport) IndicesExplainDataLifecycle // IndicesExplainDataLifecycle retrieves information about the index's current data stream lifecycle, such as any potential encountered error, time since creation etc. // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html. type IndicesExplainDataLifecycle func(index string, o ...func(*IndicesExplainDataLifecycleRequest)) (*Response, error) diff --git a/esapi/api.indices.field_usage_stats.go b/esapi/api.indices.field_usage_stats.go index 7c7083bb82..b1f9eb3bfa 100644 --- a/esapi/api.indices.field_usage_stats.go +++ b/esapi/api.indices.field_usage_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.flush.go b/esapi/api.indices.flush.go index 133a0dc0ba..3959bf0563 100644 --- a/esapi/api.indices.flush.go +++ b/esapi/api.indices.flush.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.forcemerge.go b/esapi/api.indices.forcemerge.go index 667e749707..a32754eef9 100644 --- a/esapi/api.indices.forcemerge.go +++ b/esapi/api.indices.forcemerge.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get.go b/esapi/api.indices.get.go index 1ac65c0a7a..6a5ddd05c5 100644 --- a/esapi/api.indices.get.go +++ b/esapi/api.indices.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_alias.go b/esapi/api.indices.get_alias.go index f69d37f75f..0581b00a1e 100644 --- a/esapi/api.indices.get_alias.go +++ b/esapi/api.indices.get_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_data_lifecycle.go b/esapi/api.indices.get_data_lifecycle.go index 937964f14a..ee7552870f 100644 --- a/esapi/api.indices.get_data_lifecycle.go +++ b/esapi/api.indices.get_data_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -47,8 +47,6 @@ func newIndicesGetDataLifecycleFunc(t Transport) IndicesGetDataLifecycle { // IndicesGetDataLifecycle returns the data stream lifecycle of the selected data streams. // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html. type IndicesGetDataLifecycle func(name []string, o ...func(*IndicesGetDataLifecycleRequest)) (*Response, error) diff --git a/esapi/api.indices.get_data_lifecycle_stats.go b/esapi/api.indices.get_data_lifecycle_stats.go new file mode 100644 index 0000000000..70f401baf1 --- /dev/null +++ b/esapi/api.indices.get_data_lifecycle_stats.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesGetDataLifecycleStatsFunc(t Transport) IndicesGetDataLifecycleStats { + return func(o ...func(*IndicesGetDataLifecycleStatsRequest)) (*Response, error) { + var r = IndicesGetDataLifecycleStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetDataLifecycleStats get data stream lifecycle statistics. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle-stats.html. +type IndicesGetDataLifecycleStats func(o ...func(*IndicesGetDataLifecycleStatsRequest)) (*Response, error) + +// IndicesGetDataLifecycleStatsRequest configures the Indices Get Data Lifecycle Stats API request. +type IndicesGetDataLifecycleStatsRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetDataLifecycleStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_lifecycle/stats")) + path.WriteString("http://") + path.WriteString("/_lifecycle/stats") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_lifecycle_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_lifecycle_stats") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetDataLifecycleStats) WithContext(v context.Context) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetDataLifecycleStats) WithPretty() func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetDataLifecycleStats) WithHuman() func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetDataLifecycleStats) WithErrorTrace() func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetDataLifecycleStats) WithFilterPath(v ...string) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetDataLifecycleStats) WithHeader(h map[string]string) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetDataLifecycleStats) WithOpaqueID(s string) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.get_field_mapping.go b/esapi/api.indices.get_field_mapping.go index d67cb2af50..d76a0be96b 100644 --- a/esapi/api.indices.get_field_mapping.go +++ b/esapi/api.indices.get_field_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_index_template.go b/esapi/api.indices.get_index_template.go index 7568269bec..593165dcd0 100644 --- a/esapi/api.indices.get_index_template.go +++ b/esapi/api.indices.get_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_mapping.go b/esapi/api.indices.get_mapping.go index 762d143f43..fde75293f0 100644 --- a/esapi/api.indices.get_mapping.go +++ b/esapi/api.indices.get_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_migrate_reindex_status.go b/esapi/api.indices.get_migrate_reindex_status.go new file mode 100644 index 0000000000..e3834dc722 --- /dev/null +++ b/esapi/api.indices.get_migrate_reindex_status.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesGetMigrateReindexStatusFunc(t Transport) IndicesGetMigrateReindexStatus { + return func(index string, o ...func(*IndicesGetMigrateReindexStatusRequest)) (*Response, error) { + var r = IndicesGetMigrateReindexStatusRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetMigrateReindexStatus this API returns the status of a migration reindex attempt for a data stream or index +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex-status-api.html. +type IndicesGetMigrateReindexStatus func(index string, o ...func(*IndicesGetMigrateReindexStatusRequest)) (*Response, error) + +// IndicesGetMigrateReindexStatusRequest configures the Indices Get Migrate Reindex Status API request. +type IndicesGetMigrateReindexStatusRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetMigrateReindexStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_migration") + 1 + len("reindex") + 1 + len(r.Index) + 1 + len("_status")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_status") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_migrate_reindex_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_migrate_reindex_status") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetMigrateReindexStatus) WithContext(v context.Context) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetMigrateReindexStatus) WithPretty() func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetMigrateReindexStatus) WithHuman() func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetMigrateReindexStatus) WithErrorTrace() func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetMigrateReindexStatus) WithFilterPath(v ...string) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetMigrateReindexStatus) WithHeader(h map[string]string) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetMigrateReindexStatus) WithOpaqueID(s string) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.get_settings.go b/esapi/api.indices.get_settings.go index 08c36a63cc..3cf35ec31a 100644 --- a/esapi/api.indices.get_settings.go +++ b/esapi/api.indices.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.get_template.go b/esapi/api.indices.get_template.go index 08772e5bfe..fc1cdcfc61 100644 --- a/esapi/api.indices.get_template.go +++ b/esapi/api.indices.get_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.migrate_reindex.go b/esapi/api.indices.migrate_reindex.go new file mode 100644 index 0000000000..d27cbe3914 --- /dev/null +++ b/esapi/api.indices.migrate_reindex.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIndicesMigrateReindexFunc(t Transport) IndicesMigrateReindex { + return func(body io.Reader, o ...func(*IndicesMigrateReindexRequest)) (*Response, error) { + var r = IndicesMigrateReindexRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesMigrateReindex this API reindexes all legacy backing indices for a data stream. It does this in a persistent task. The persistent task id is returned immediately, and the reindexing work is completed in that task +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex-api.html. +type IndicesMigrateReindex func(body io.Reader, o ...func(*IndicesMigrateReindexRequest)) (*Response, error) + +// IndicesMigrateReindexRequest configures the Indices Migrate Reindex API request. +type IndicesMigrateReindexRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesMigrateReindexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_migration/reindex")) + path.WriteString("http://") + path.WriteString("/_migration/reindex") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.migrate_reindex") + if reader := instrument.RecordRequestBody(ctx, "indices.migrate_reindex", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.migrate_reindex") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesMigrateReindex) WithContext(v context.Context) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesMigrateReindex) WithPretty() func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesMigrateReindex) WithHuman() func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesMigrateReindex) WithErrorTrace() func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesMigrateReindex) WithFilterPath(v ...string) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesMigrateReindex) WithHeader(h map[string]string) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesMigrateReindex) WithOpaqueID(s string) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.indices.modify_data_stream.go b/esapi/api.indices.modify_data_stream.go index e60f2e2cde..89f258cfb7 100644 --- a/esapi/api.indices.modify_data_stream.go +++ b/esapi/api.indices.modify_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.open.go b/esapi/api.indices.open.go index 30120936f8..303f30add0 100644 --- a/esapi/api.indices.open.go +++ b/esapi/api.indices.open.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_alias.go b/esapi/api.indices.put_alias.go index 064779b444..a57e287beb 100644 --- a/esapi/api.indices.put_alias.go +++ b/esapi/api.indices.put_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_data_lifecycle.go b/esapi/api.indices.put_data_lifecycle.go index 0417453401..4f5d81719f 100644 --- a/esapi/api.indices.put_data_lifecycle.go +++ b/esapi/api.indices.put_data_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -47,8 +47,6 @@ func newIndicesPutDataLifecycleFunc(t Transport) IndicesPutDataLifecycle { // IndicesPutDataLifecycle updates the data stream lifecycle of the selected data streams. // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html. type IndicesPutDataLifecycle func(name []string, o ...func(*IndicesPutDataLifecycleRequest)) (*Response, error) diff --git a/esapi/api.indices.put_index_template.go b/esapi/api.indices.put_index_template.go index ecc4fbf408..42cab28fd8 100644 --- a/esapi/api.indices.put_index_template.go +++ b/esapi/api.indices.put_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_mapping.go b/esapi/api.indices.put_mapping.go index 93c88c1571..16557245ef 100644 --- a/esapi/api.indices.put_mapping.go +++ b/esapi/api.indices.put_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_settings.go b/esapi/api.indices.put_settings.go index be486699ff..2e83a24018 100644 --- a/esapi/api.indices.put_settings.go +++ b/esapi/api.indices.put_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.put_template.go b/esapi/api.indices.put_template.go index 9a074077a5..82ab9dd021 100644 --- a/esapi/api.indices.put_template.go +++ b/esapi/api.indices.put_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.recovery.go b/esapi/api.indices.recovery.go index 02a157974e..e1e1c45777 100644 --- a/esapi/api.indices.recovery.go +++ b/esapi/api.indices.recovery.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.refresh.go b/esapi/api.indices.refresh.go index 611e283f7c..81714c3de0 100644 --- a/esapi/api.indices.refresh.go +++ b/esapi/api.indices.refresh.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.resolve_cluster.go b/esapi/api.indices.resolve_cluster.go index 582e1275d1..376904f864 100644 --- a/esapi/api.indices.resolve_cluster.go +++ b/esapi/api.indices.resolve_cluster.go @@ -15,21 +15,21 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi import ( "context" - "errors" "net/http" "strconv" "strings" + "time" ) func newIndicesResolveClusterFunc(t Transport) IndicesResolveCluster { - return func(name []string, o ...func(*IndicesResolveClusterRequest)) (*Response, error) { - var r = IndicesResolveClusterRequest{Name: name} + return func(o ...func(*IndicesResolveClusterRequest)) (*Response, error) { + var r = IndicesResolveClusterRequest{} for _, f := range o { f(&r) } @@ -44,10 +44,10 @@ func newIndicesResolveClusterFunc(t Transport) IndicesResolveCluster { // ----- API Definition ------------------------------------------------------- -// IndicesResolveCluster resolves the specified index expressions to return information about each cluster, including the local cluster, if included. +// IndicesResolveCluster resolves the specified index expressions to return information about each cluster. If no index expression is provided, this endpoint will return information about all the remote clusters that are configured on the local cluster. // // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html. -type IndicesResolveCluster func(name []string, o ...func(*IndicesResolveClusterRequest)) (*Response, error) +type IndicesResolveCluster func(o ...func(*IndicesResolveClusterRequest)) (*Response, error) // IndicesResolveClusterRequest configures the Indices Resolve Cluster API request. type IndicesResolveClusterRequest struct { @@ -57,6 +57,7 @@ type IndicesResolveClusterRequest struct { ExpandWildcards string IgnoreThrottled *bool IgnoreUnavailable *bool + Timeout time.Duration Pretty bool Human bool @@ -89,20 +90,18 @@ func (r IndicesResolveClusterRequest) Do(providedCtx context.Context, transport method = "GET" - if len(r.Name) == 0 { - return nil, errors.New("name is required and cannot be nil or empty") - } - path.Grow(7 + 1 + len("_resolve") + 1 + len("cluster") + 1 + len(strings.Join(r.Name, ","))) path.WriteString("http://") path.WriteString("/") path.WriteString("_resolve") path.WriteString("/") path.WriteString("cluster") - path.WriteString("/") - path.WriteString(strings.Join(r.Name, ",")) - if instrument, ok := r.instrument.(Instrumentation); ok { - instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } } params = make(map[string]string) @@ -123,6 +122,10 @@ func (r IndicesResolveClusterRequest) Do(providedCtx context.Context, transport params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) } + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -201,34 +204,48 @@ func (f IndicesResolveCluster) WithContext(v context.Context) func(*IndicesResol } } -// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +// WithName - a list of cluster:index names or wildcard expressions. +func (f IndicesResolveCluster) WithName(v ...string) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.Name = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). only allowed when providing an index expression.. func (f IndicesResolveCluster) WithAllowNoIndices(v bool) func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { r.AllowNoIndices = &v } } -// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). only allowed when providing an index expression.. func (f IndicesResolveCluster) WithExpandWildcards(v string) func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { r.ExpandWildcards = v } } -// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. +// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. only allowed when providing an index expression.. func (f IndicesResolveCluster) WithIgnoreThrottled(v bool) func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { r.IgnoreThrottled = &v } } -// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). only allowed when providing an index expression.. func (f IndicesResolveCluster) WithIgnoreUnavailable(v bool) func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { r.IgnoreUnavailable = &v } } +// WithTimeout - the maximum time to wait for remote clusters to respond. +func (f IndicesResolveCluster) WithTimeout(v time.Duration) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f IndicesResolveCluster) WithPretty() func(*IndicesResolveClusterRequest) { return func(r *IndicesResolveClusterRequest) { diff --git a/esapi/api.indices.resolve_index.go b/esapi/api.indices.resolve_index.go index a29123bb4f..7fbaa19c44 100644 --- a/esapi/api.indices.resolve_index.go +++ b/esapi/api.indices.resolve_index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "errors" "net/http" + "strconv" "strings" ) @@ -52,7 +53,9 @@ type IndicesResolveIndex func(name []string, o ...func(*IndicesResolveIndexReque type IndicesResolveIndexRequest struct { Name []string - ExpandWildcards string + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool Pretty bool Human bool @@ -103,10 +106,18 @@ func (r IndicesResolveIndexRequest) Do(providedCtx context.Context, transport Tr params = make(map[string]string) + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + if r.ExpandWildcards != "" { params["expand_wildcards"] = r.ExpandWildcards } + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + if r.Pretty { params["pretty"] = "true" } @@ -185,6 +196,13 @@ func (f IndicesResolveIndex) WithContext(v context.Context) func(*IndicesResolve } } +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesResolveIndex) WithAllowNoIndices(v bool) func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.AllowNoIndices = &v + } +} + // WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). func (f IndicesResolveIndex) WithExpandWildcards(v string) func(*IndicesResolveIndexRequest) { return func(r *IndicesResolveIndexRequest) { @@ -192,6 +210,13 @@ func (f IndicesResolveIndex) WithExpandWildcards(v string) func(*IndicesResolveI } } +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesResolveIndex) WithIgnoreUnavailable(v bool) func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.IgnoreUnavailable = &v + } +} + // WithPretty makes the response body pretty-printed. func (f IndicesResolveIndex) WithPretty() func(*IndicesResolveIndexRequest) { return func(r *IndicesResolveIndexRequest) { diff --git a/esapi/api.indices.rollover.go b/esapi/api.indices.rollover.go index c75eebe9f3..e07c087eae 100644 --- a/esapi/api.indices.rollover.go +++ b/esapi/api.indices.rollover.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -61,7 +61,6 @@ type IndicesRolloverRequest struct { DryRun *bool Lazy *bool MasterTimeout time.Duration - TargetFailureStore *bool Timeout time.Duration WaitForActiveShards string @@ -127,10 +126,6 @@ func (r IndicesRolloverRequest) Do(providedCtx context.Context, transport Transp params["master_timeout"] = formatDuration(r.MasterTimeout) } - if r.TargetFailureStore != nil { - params["target_failure_store"] = strconv.FormatBool(*r.TargetFailureStore) - } - if r.Timeout != 0 { params["timeout"] = formatDuration(r.Timeout) } @@ -259,13 +254,6 @@ func (f IndicesRollover) WithMasterTimeout(v time.Duration) func(*IndicesRollove } } -// WithTargetFailureStore - if set to true, the rollover action will be applied on the failure store of the data stream.. -func (f IndicesRollover) WithTargetFailureStore(v bool) func(*IndicesRolloverRequest) { - return func(r *IndicesRolloverRequest) { - r.TargetFailureStore = &v - } -} - // WithTimeout - explicit operation timeout. func (f IndicesRollover) WithTimeout(v time.Duration) func(*IndicesRolloverRequest) { return func(r *IndicesRolloverRequest) { diff --git a/esapi/api.indices.segments.go b/esapi/api.indices.segments.go index ecfe009bbe..170cac30ba 100644 --- a/esapi/api.indices.segments.go +++ b/esapi/api.indices.segments.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.shard_stores.go b/esapi/api.indices.shard_stores.go index 933a70dbbf..a054cae929 100644 --- a/esapi/api.indices.shard_stores.go +++ b/esapi/api.indices.shard_stores.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.shrink.go b/esapi/api.indices.shrink.go index cc9d992482..5901c070b4 100644 --- a/esapi/api.indices.shrink.go +++ b/esapi/api.indices.shrink.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.simulate_index_template.go b/esapi/api.indices.simulate_index_template.go index b7bf01084a..6d2eb13bac 100644 --- a/esapi/api.indices.simulate_index_template.go +++ b/esapi/api.indices.simulate_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.simulate_template.go b/esapi/api.indices.simulate_template.go index fcb6e977de..c9ab91a636 100644 --- a/esapi/api.indices.simulate_template.go +++ b/esapi/api.indices.simulate_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.split.go b/esapi/api.indices.split.go index 27a59c0bf6..d131e5181b 100644 --- a/esapi/api.indices.split.go +++ b/esapi/api.indices.split.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.stats.go b/esapi/api.indices.stats.go index e48590147e..f5f7e547e9 100644 --- a/esapi/api.indices.stats.go +++ b/esapi/api.indices.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.update_aliases.go b/esapi/api.indices.update_aliases.go index 8a62abdf2c..a201d0cca6 100644 --- a/esapi/api.indices.update_aliases.go +++ b/esapi/api.indices.update_aliases.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.indices.validate_query.go b/esapi/api.indices.validate_query.go index 2410ac43c3..c3b2534957 100644 --- a/esapi/api.indices.validate_query.go +++ b/esapi/api.indices.validate_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.inference.chat_completion_unified.go b/esapi/api.inference.chat_completion_unified.go new file mode 100644 index 0000000000..b8a61f7c47 --- /dev/null +++ b/esapi/api.inference.chat_completion_unified.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceChatCompletionUnifiedFunc(t Transport) InferenceChatCompletionUnified { + return func(inference_id string, o ...func(*InferenceChatCompletionUnifiedRequest)) (*Response, error) { + var r = InferenceChatCompletionUnifiedRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceChatCompletionUnified perform chat completion inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/chat-completion-inference.html. +type InferenceChatCompletionUnified func(inference_id string, o ...func(*InferenceChatCompletionUnifiedRequest)) (*Response, error) + +// InferenceChatCompletionUnifiedRequest configures the Inference Chat Completion Unified API request. +type InferenceChatCompletionUnifiedRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceChatCompletionUnifiedRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.chat_completion_unified") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("chat_completion") + 1 + len(r.InferenceID) + 1 + len("_stream")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("chat_completion") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + path.WriteString("/") + path.WriteString("_stream") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.chat_completion_unified") + if reader := instrument.RecordRequestBody(ctx, "inference.chat_completion_unified", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.chat_completion_unified") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceChatCompletionUnified) WithContext(v context.Context) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceChatCompletionUnified) WithBody(v io.Reader) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceChatCompletionUnified) WithPretty() func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceChatCompletionUnified) WithHuman() func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceChatCompletionUnified) WithErrorTrace() func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceChatCompletionUnified) WithFilterPath(v ...string) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceChatCompletionUnified) WithHeader(h map[string]string) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceChatCompletionUnified) WithOpaqueID(s string) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.completion.go b/esapi/api.inference.completion.go new file mode 100644 index 0000000000..f2c6a6d759 --- /dev/null +++ b/esapi/api.inference.completion.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceCompletionFunc(t Transport) InferenceCompletion { + return func(inference_id string, o ...func(*InferenceCompletionRequest)) (*Response, error) { + var r = InferenceCompletionRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceCompletion perform completion inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceCompletion func(inference_id string, o ...func(*InferenceCompletionRequest)) (*Response, error) + +// InferenceCompletionRequest configures the Inference Completion API request. +type InferenceCompletionRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceCompletionRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("completion") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.completion") + if reader := instrument.RecordRequestBody(ctx, "inference.completion", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.completion") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceCompletion) WithContext(v context.Context) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceCompletion) WithBody(v io.Reader) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceCompletion) WithPretty() func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceCompletion) WithHuman() func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceCompletion) WithErrorTrace() func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceCompletion) WithFilterPath(v ...string) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceCompletion) WithHeader(h map[string]string) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceCompletion) WithOpaqueID(s string) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.delete.go b/esapi/api.inference.delete.go index 73828f2513..5539f1ebb4 100644 --- a/esapi/api.inference.delete.go +++ b/esapi/api.inference.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -45,8 +45,6 @@ func newInferenceDeleteFunc(t Transport) InferenceDelete { // InferenceDelete delete an inference endpoint // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html. type InferenceDelete func(inference_id string, o ...func(*InferenceDeleteRequest)) (*Response, error) diff --git a/esapi/api.inference.get.go b/esapi/api.inference.get.go index ceb1673d7f..3f0a677b79 100644 --- a/esapi/api.inference.get.go +++ b/esapi/api.inference.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -44,8 +44,6 @@ func newInferenceGetFunc(t Transport) InferenceGet { // InferenceGet get an inference endpoint // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html. type InferenceGet func(o ...func(*InferenceGetRequest)) (*Response, error) diff --git a/esapi/api.inference.put.go b/esapi/api.inference.put.go index 7193a51065..514a0b069e 100644 --- a/esapi/api.inference.put.go +++ b/esapi/api.inference.put.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -45,8 +45,6 @@ func newInferencePutFunc(t Transport) InferencePut { // InferencePut configure an inference endpoint for use in the Inference API // -// This API is experimental. -// // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html. type InferencePut func(inference_id string, o ...func(*InferencePutRequest)) (*Response, error) diff --git a/esapi/api.inference.rerank.go b/esapi/api.inference.rerank.go new file mode 100644 index 0000000000..232a7f7056 --- /dev/null +++ b/esapi/api.inference.rerank.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceRerankFunc(t Transport) InferenceRerank { + return func(inference_id string, o ...func(*InferenceRerankRequest)) (*Response, error) { + var r = InferenceRerankRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceRerank perform reranking inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceRerank func(inference_id string, o ...func(*InferenceRerankRequest)) (*Response, error) + +// InferenceRerankRequest configures the Inference Rerank API request. +type InferenceRerankRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceRerankRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.rerank") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("rerank") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("rerank") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.rerank") + if reader := instrument.RecordRequestBody(ctx, "inference.rerank", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.rerank") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceRerank) WithContext(v context.Context) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceRerank) WithBody(v io.Reader) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceRerank) WithPretty() func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceRerank) WithHuman() func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceRerank) WithErrorTrace() func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceRerank) WithFilterPath(v ...string) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceRerank) WithHeader(h map[string]string) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceRerank) WithOpaqueID(s string) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.sparse_embedding.go b/esapi/api.inference.sparse_embedding.go new file mode 100644 index 0000000000..1300482fa6 --- /dev/null +++ b/esapi/api.inference.sparse_embedding.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceSparseEmbeddingFunc(t Transport) InferenceSparseEmbedding { + return func(inference_id string, o ...func(*InferenceSparseEmbeddingRequest)) (*Response, error) { + var r = InferenceSparseEmbeddingRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceSparseEmbedding perform sparse embedding inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceSparseEmbedding func(inference_id string, o ...func(*InferenceSparseEmbeddingRequest)) (*Response, error) + +// InferenceSparseEmbeddingRequest configures the Inference Sparse Embedding API request. +type InferenceSparseEmbeddingRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceSparseEmbeddingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.sparse_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("sparse_embedding") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("sparse_embedding") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.sparse_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.sparse_embedding", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.sparse_embedding") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceSparseEmbedding) WithContext(v context.Context) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceSparseEmbedding) WithBody(v io.Reader) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceSparseEmbedding) WithPretty() func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceSparseEmbedding) WithHuman() func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceSparseEmbedding) WithErrorTrace() func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceSparseEmbedding) WithFilterPath(v ...string) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceSparseEmbedding) WithHeader(h map[string]string) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceSparseEmbedding) WithOpaqueID(s string) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.stream_completion.go b/esapi/api.inference.stream_completion.go new file mode 100644 index 0000000000..f23ce03fcf --- /dev/null +++ b/esapi/api.inference.stream_completion.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceStreamCompletionFunc(t Transport) InferenceStreamCompletion { + return func(inference_id string, o ...func(*InferenceStreamCompletionRequest)) (*Response, error) { + var r = InferenceStreamCompletionRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceStreamCompletion perform streaming completion inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-stream-inference-api.html. +type InferenceStreamCompletion func(inference_id string, o ...func(*InferenceStreamCompletionRequest)) (*Response, error) + +// InferenceStreamCompletionRequest configures the Inference Stream Completion API request. +type InferenceStreamCompletionRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceStreamCompletionRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.stream_completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("completion") + 1 + len(r.InferenceID) + 1 + len("_stream")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + path.WriteString("/") + path.WriteString("_stream") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.stream_completion") + if reader := instrument.RecordRequestBody(ctx, "inference.stream_completion", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.stream_completion") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceStreamCompletion) WithContext(v context.Context) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceStreamCompletion) WithBody(v io.Reader) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceStreamCompletion) WithPretty() func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceStreamCompletion) WithHuman() func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceStreamCompletion) WithErrorTrace() func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceStreamCompletion) WithFilterPath(v ...string) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceStreamCompletion) WithHeader(h map[string]string) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceStreamCompletion) WithOpaqueID(s string) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.text_embedding.go b/esapi/api.inference.text_embedding.go new file mode 100644 index 0000000000..65c72a81ea --- /dev/null +++ b/esapi/api.inference.text_embedding.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceTextEmbeddingFunc(t Transport) InferenceTextEmbedding { + return func(inference_id string, o ...func(*InferenceTextEmbeddingRequest)) (*Response, error) { + var r = InferenceTextEmbeddingRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceTextEmbedding perform text embedding inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceTextEmbedding func(inference_id string, o ...func(*InferenceTextEmbeddingRequest)) (*Response, error) + +// InferenceTextEmbeddingRequest configures the Inference Text Embedding API request. +type InferenceTextEmbeddingRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceTextEmbeddingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.text_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("text_embedding") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("text_embedding") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.text_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.text_embedding", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.text_embedding") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceTextEmbedding) WithContext(v context.Context) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceTextEmbedding) WithBody(v io.Reader) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceTextEmbedding) WithPretty() func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceTextEmbedding) WithHuman() func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceTextEmbedding) WithErrorTrace() func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceTextEmbedding) WithFilterPath(v ...string) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceTextEmbedding) WithHeader(h map[string]string) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceTextEmbedding) WithOpaqueID(s string) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.inference.update.go b/esapi/api.inference.update.go new file mode 100644 index 0000000000..93bd2f938c --- /dev/null +++ b/esapi/api.inference.update.go @@ -0,0 +1,257 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceUpdateFunc(t Transport) InferenceUpdate { + return func(inference_id string, o ...func(*InferenceUpdateRequest)) (*Response, error) { + var r = InferenceUpdateRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceUpdate update inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-inference-api.html. +type InferenceUpdate func(inference_id string, o ...func(*InferenceUpdateRequest)) (*Response, error) + +// InferenceUpdateRequest configures the Inference Update API request. +type InferenceUpdateRequest struct { + Body io.Reader + + InferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceUpdateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.update") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.InferenceID) + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + if r.TaskType != "" { + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + } + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.update") + if reader := instrument.RecordRequestBody(ctx, "inference.update", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.update") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceUpdate) WithContext(v context.Context) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferenceUpdate) WithBody(v io.Reader) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.Body = v + } +} + +// WithTaskType - the task type. +func (f InferenceUpdate) WithTaskType(v string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.TaskType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceUpdate) WithPretty() func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceUpdate) WithHuman() func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceUpdate) WithErrorTrace() func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceUpdate) WithFilterPath(v ...string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceUpdate) WithHeader(h map[string]string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceUpdate) WithOpaqueID(s string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.info.go b/esapi/api.info.go index 5ade00d083..4e9d63a9b4 100644 --- a/esapi/api.info.go +++ b/esapi/api.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.delete_geoip_database.go b/esapi/api.ingest.delete_geoip_database.go index ec3bcdf2bf..a207ab6b0e 100644 --- a/esapi/api.ingest.delete_geoip_database.go +++ b/esapi/api.ingest.delete_geoip_database.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -45,7 +45,7 @@ func newIngestDeleteGeoipDatabaseFunc(t Transport) IngestDeleteGeoipDatabase { // IngestDeleteGeoipDatabase deletes a geoip database configuration // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-geoip-database-api.html. type IngestDeleteGeoipDatabase func(id []string, o ...func(*IngestDeleteGeoipDatabaseRequest)) (*Response, error) // IngestDeleteGeoipDatabaseRequest configures the Ingest Delete Geoip Database API request. diff --git a/esapi/api.ingest.delete_ip_location_database.go b/esapi/api.ingest.delete_ip_location_database.go new file mode 100644 index 0000000000..90bcac2149 --- /dev/null +++ b/esapi/api.ingest.delete_ip_location_database.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newIngestDeleteIPLocationDatabaseFunc(t Transport) IngestDeleteIPLocationDatabase { + return func(id []string, o ...func(*IngestDeleteIPLocationDatabaseRequest)) (*Response, error) { + var r = IngestDeleteIPLocationDatabaseRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestDeleteIPLocationDatabase deletes an ip location database configuration +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-ip-location-database-api.html. +type IngestDeleteIPLocationDatabase func(id []string, o ...func(*IngestDeleteIPLocationDatabaseRequest)) (*Response, error) + +// IngestDeleteIPLocationDatabaseRequest configures the Ingest DeleteIP Location Database API request. +type IngestDeleteIPLocationDatabaseRequest struct { + DocumentID []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestDeleteIPLocationDatabaseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.DocumentID) == 0 { + return nil, errors.New("id is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_ingest") + 1 + len("ip_location") + 1 + len("database") + 1 + len(strings.Join(r.DocumentID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + path.WriteString(strings.Join(r.DocumentID, ",")) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", strings.Join(r.DocumentID, ",")) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.delete_ip_location_database") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_ip_location_database") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestDeleteIPLocationDatabase) WithContext(v context.Context) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestDeleteIPLocationDatabase) WithPretty() func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestDeleteIPLocationDatabase) WithHuman() func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestDeleteIPLocationDatabase) WithErrorTrace() func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestDeleteIPLocationDatabase) WithFilterPath(v ...string) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestDeleteIPLocationDatabase) WithHeader(h map[string]string) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestDeleteIPLocationDatabase) WithOpaqueID(s string) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.ingest.delete_pipeline.go b/esapi/api.ingest.delete_pipeline.go index 77dd8c0489..48d336586f 100644 --- a/esapi/api.ingest.delete_pipeline.go +++ b/esapi/api.ingest.delete_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.geo_ip_stats.go b/esapi/api.ingest.geo_ip_stats.go index 9165dec507..1978cb2e22 100644 --- a/esapi/api.ingest.geo_ip_stats.go +++ b/esapi/api.ingest.geo_ip_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.get_geoip_database.go b/esapi/api.ingest.get_geoip_database.go index 30a981223a..dd73a4bd8b 100644 --- a/esapi/api.ingest.get_geoip_database.go +++ b/esapi/api.ingest.get_geoip_database.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -44,7 +44,7 @@ func newIngestGetGeoipDatabaseFunc(t Transport) IngestGetGeoipDatabase { // IngestGetGeoipDatabase returns geoip database configuration. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-geoip-database-api.html. type IngestGetGeoipDatabase func(o ...func(*IngestGetGeoipDatabaseRequest)) (*Response, error) // IngestGetGeoipDatabaseRequest configures the Ingest Get Geoip Database API request. diff --git a/esapi/api.ingest.get_ip_location_database.go b/esapi/api.ingest.get_ip_location_database.go new file mode 100644 index 0000000000..f3baacf19e --- /dev/null +++ b/esapi/api.ingest.get_ip_location_database.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIngestGetIPLocationDatabaseFunc(t Transport) IngestGetIPLocationDatabase { + return func(o ...func(*IngestGetIPLocationDatabaseRequest)) (*Response, error) { + var r = IngestGetIPLocationDatabaseRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestGetIPLocationDatabase returns the specified ip location database configuration +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ip-location-database-api.html. +type IngestGetIPLocationDatabase func(o ...func(*IngestGetIPLocationDatabaseRequest)) (*Response, error) + +// IngestGetIPLocationDatabaseRequest configures the Ingest GetIP Location Database API request. +type IngestGetIPLocationDatabaseRequest struct { + DocumentID []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestGetIPLocationDatabaseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("ip_location") + 1 + len("database") + 1 + len(strings.Join(r.DocumentID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + if len(r.DocumentID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.DocumentID, ",")) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", strings.Join(r.DocumentID, ",")) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_ip_location_database") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_ip_location_database") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestGetIPLocationDatabase) WithContext(v context.Context) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.ctx = v + } +} + +// WithDocumentID - a list of ip location database configurations to get; use `*` to get all ip location database configurations. +func (f IngestGetIPLocationDatabase) WithDocumentID(v ...string) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.DocumentID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestGetIPLocationDatabase) WithPretty() func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestGetIPLocationDatabase) WithHuman() func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestGetIPLocationDatabase) WithErrorTrace() func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestGetIPLocationDatabase) WithFilterPath(v ...string) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestGetIPLocationDatabase) WithHeader(h map[string]string) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestGetIPLocationDatabase) WithOpaqueID(s string) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.ingest.get_pipeline.go b/esapi/api.ingest.get_pipeline.go index 73e9727661..94469a28a6 100644 --- a/esapi/api.ingest.get_pipeline.go +++ b/esapi/api.ingest.get_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.processor_grok.go b/esapi/api.ingest.processor_grok.go index 809fef64c5..28d24296c4 100644 --- a/esapi/api.ingest.processor_grok.go +++ b/esapi/api.ingest.processor_grok.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.put_geoip_database.go b/esapi/api.ingest.put_geoip_database.go index 4907c71d7d..1dcd6d40e8 100644 --- a/esapi/api.ingest.put_geoip_database.go +++ b/esapi/api.ingest.put_geoip_database.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -45,7 +45,7 @@ func newIngestPutGeoipDatabaseFunc(t Transport) IngestPutGeoipDatabase { // IngestPutGeoipDatabase puts the configuration for a geoip database to be downloaded // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-geoip-database-api.html. type IngestPutGeoipDatabase func(id string, body io.Reader, o ...func(*IngestPutGeoipDatabaseRequest)) (*Response, error) // IngestPutGeoipDatabaseRequest configures the Ingest Put Geoip Database API request. diff --git a/esapi/api.ingest.put_ip_location_database.go b/esapi/api.ingest.put_ip_location_database.go new file mode 100644 index 0000000000..aa8b30487b --- /dev/null +++ b/esapi/api.ingest.put_ip_location_database.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIngestPutIPLocationDatabaseFunc(t Transport) IngestPutIPLocationDatabase { + return func(id string, body io.Reader, o ...func(*IngestPutIPLocationDatabaseRequest)) (*Response, error) { + var r = IngestPutIPLocationDatabaseRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestPutIPLocationDatabase puts the configuration for a ip location database to be downloaded +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-ip-location-database-api.html. +type IngestPutIPLocationDatabase func(id string, body io.Reader, o ...func(*IngestPutIPLocationDatabaseRequest)) (*Response, error) + +// IngestPutIPLocationDatabaseRequest configures the Ingest PutIP Location Database API request. +type IngestPutIPLocationDatabaseRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestPutIPLocationDatabaseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("ip_location") + 1 + len("database") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_ip_location_database", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_ip_location_database") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestPutIPLocationDatabase) WithContext(v context.Context) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestPutIPLocationDatabase) WithPretty() func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestPutIPLocationDatabase) WithHuman() func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestPutIPLocationDatabase) WithErrorTrace() func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestPutIPLocationDatabase) WithFilterPath(v ...string) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestPutIPLocationDatabase) WithHeader(h map[string]string) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestPutIPLocationDatabase) WithOpaqueID(s string) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.ingest.put_pipeline.go b/esapi/api.ingest.put_pipeline.go index 833b2cb50b..3d64485889 100644 --- a/esapi/api.ingest.put_pipeline.go +++ b/esapi/api.ingest.put_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.ingest.simulate.go b/esapi/api.ingest.simulate.go index 1ca53dc154..ccf2b41bbc 100644 --- a/esapi/api.ingest.simulate.go +++ b/esapi/api.ingest.simulate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.knn_search.go b/esapi/api.knn_search.go index 71c2980cb8..4ac14bf10f 100644 --- a/esapi/api.knn_search.go +++ b/esapi/api.knn_search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.mget.go b/esapi/api.mget.go index 6a6990631a..c4b33140a2 100644 --- a/esapi/api.mget.go +++ b/esapi/api.mget.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.msearch.go b/esapi/api.msearch.go index 60308eb986..3801e1c838 100644 --- a/esapi/api.msearch.go +++ b/esapi/api.msearch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.msearch_template.go b/esapi/api.msearch_template.go index 5a5cdb765d..23a0ef4d49 100644 --- a/esapi/api.msearch_template.go +++ b/esapi/api.msearch_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.mtermvectors.go b/esapi/api.mtermvectors.go index 9bb3f2eb45..7b34fb05d0 100644 --- a/esapi/api.mtermvectors.go +++ b/esapi/api.mtermvectors.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.clear_repositories_metering_archive.go b/esapi/api.nodes.clear_repositories_metering_archive.go index f9e4551e7f..09adbb9b40 100644 --- a/esapi/api.nodes.clear_repositories_metering_archive.go +++ b/esapi/api.nodes.clear_repositories_metering_archive.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.get_repositories_metering_info.go b/esapi/api.nodes.get_repositories_metering_info.go index 2a6e7dc3d2..eeff2146c5 100644 --- a/esapi/api.nodes.get_repositories_metering_info.go +++ b/esapi/api.nodes.get_repositories_metering_info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.hot_threads.go b/esapi/api.nodes.hot_threads.go index d2092113df..701adff792 100644 --- a/esapi/api.nodes.hot_threads.go +++ b/esapi/api.nodes.hot_threads.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.info.go b/esapi/api.nodes.info.go index 0bf18f5e02..83d1c44b92 100644 --- a/esapi/api.nodes.info.go +++ b/esapi/api.nodes.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.reload_secure_settings.go b/esapi/api.nodes.reload_secure_settings.go index 591c966cfc..c78fa0bcfb 100644 --- a/esapi/api.nodes.reload_secure_settings.go +++ b/esapi/api.nodes.reload_secure_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.stats.go b/esapi/api.nodes.stats.go index a256c094a6..9699ecc55e 100644 --- a/esapi/api.nodes.stats.go +++ b/esapi/api.nodes.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.nodes.usage.go b/esapi/api.nodes.usage.go index 0a589f062d..ef953889c5 100644 --- a/esapi/api.nodes.usage.go +++ b/esapi/api.nodes.usage.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.ping.go b/esapi/api.ping.go index 1e4397a5a2..b800f3440d 100644 --- a/esapi/api.ping.go +++ b/esapi/api.ping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.profiling.stacktraces.go b/esapi/api.profiling.stacktraces.go index 9cdafeec3a..37473c88b9 100644 --- a/esapi/api.profiling.stacktraces.go +++ b/esapi/api.profiling.stacktraces.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.profiling.status.go b/esapi/api.profiling.status.go index 9441d9b872..0eda94de72 100644 --- a/esapi/api.profiling.status.go +++ b/esapi/api.profiling.status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.profiling.topn_functions.go b/esapi/api.profiling.topn_functions.go index 3287a1e994..dac2ced503 100644 --- a/esapi/api.profiling.topn_functions.go +++ b/esapi/api.profiling.topn_functions.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.put_script.go b/esapi/api.put_script.go index bc6ba719af..73d06b2de4 100644 --- a/esapi/api.put_script.go +++ b/esapi/api.put_script.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.delete_rule.go b/esapi/api.query_rules.delete_rule.go index ab1dc71fec..8ed1548bd2 100644 --- a/esapi/api.query_rules.delete_rule.go +++ b/esapi/api.query_rules.delete_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.delete_ruleset.go b/esapi/api.query_rules.delete_ruleset.go index 45dd5843ee..e39b8d8574 100644 --- a/esapi/api.query_rules.delete_ruleset.go +++ b/esapi/api.query_rules.delete_ruleset.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.get_rule.go b/esapi/api.query_rules.get_rule.go index fb5518a615..6436522fc1 100644 --- a/esapi/api.query_rules.get_rule.go +++ b/esapi/api.query_rules.get_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.get_ruleset.go b/esapi/api.query_rules.get_ruleset.go index 8ac5f6b6d2..7a18a690fe 100644 --- a/esapi/api.query_rules.get_ruleset.go +++ b/esapi/api.query_rules.get_ruleset.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.list_rulesets.go b/esapi/api.query_rules.list_rulesets.go index d21e4d6cae..ae6137dd1d 100644 --- a/esapi/api.query_rules.list_rulesets.go +++ b/esapi/api.query_rules.list_rulesets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.put_rule.go b/esapi/api.query_rules.put_rule.go index 3492271068..626ea9bf2a 100644 --- a/esapi/api.query_rules.put_rule.go +++ b/esapi/api.query_rules.put_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.put_ruleset.go b/esapi/api.query_rules.put_ruleset.go index a171e4cca7..10f873613e 100644 --- a/esapi/api.query_rules.put_ruleset.go +++ b/esapi/api.query_rules.put_ruleset.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.query_rules.test.go b/esapi/api.query_rules.test.go new file mode 100644 index 0000000000..4a0d85ff14 --- /dev/null +++ b/esapi/api.query_rules.test.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newQueryRulesTestFunc(t Transport) QueryRulesTest { + return func(body io.Reader, ruleset_id string, o ...func(*QueryRulesTestRequest)) (*Response, error) { + var r = QueryRulesTestRequest{Body: body, RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesTest tests a query ruleset to identify the rules that would match input criteria +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/test-query-ruleset.html. +type QueryRulesTest func(body io.Reader, ruleset_id string, o ...func(*QueryRulesTestRequest)) (*Response, error) + +// QueryRulesTestRequest configures the Query Rules Test API request. +type QueryRulesTestRequest struct { + Body io.Reader + + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r QueryRulesTestRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.test") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID) + 1 + len("_test")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleset_id", r.RulesetID) + } + path.WriteString("/") + path.WriteString("_test") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.test") + if reader := instrument.RecordRequestBody(ctx, "query_rules.test", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.test") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesTest) WithContext(v context.Context) func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesTest) WithPretty() func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesTest) WithHuman() func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesTest) WithErrorTrace() func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesTest) WithFilterPath(v ...string) func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesTest) WithHeader(h map[string]string) func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesTest) WithOpaqueID(s string) func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.rank_eval.go b/esapi/api.rank_eval.go index 5aef194437..08ca055c04 100644 --- a/esapi/api.rank_eval.go +++ b/esapi/api.rank_eval.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.reindex.go b/esapi/api.reindex.go index 9a8e9d8eb5..98c5f18bb7 100644 --- a/esapi/api.reindex.go +++ b/esapi/api.reindex.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.reindex_rethrottle.go b/esapi/api.reindex_rethrottle.go index 7d36716df0..ef86f8bd53 100644 --- a/esapi/api.reindex_rethrottle.go +++ b/esapi/api.reindex_rethrottle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.render_search_template.go b/esapi/api.render_search_template.go index 0d6b0ae027..df7cc6a60f 100644 --- a/esapi/api.render_search_template.go +++ b/esapi/api.render_search_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.scripts_painless_execute.go b/esapi/api.scripts_painless_execute.go index bef9a22ffb..5bbec2d830 100644 --- a/esapi/api.scripts_painless_execute.go +++ b/esapi/api.scripts_painless_execute.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.scroll.go b/esapi/api.scroll.go index 52cc8aca0d..308d52bc7f 100644 --- a/esapi/api.scroll.go +++ b/esapi/api.scroll.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search.go b/esapi/api.search.go index 3c93cd5e58..68809fc9dd 100644 --- a/esapi/api.search.go +++ b/esapi/api.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.delete.go b/esapi/api.search_application.delete.go index a36a2cd170..e94c3ce266 100644 --- a/esapi/api.search_application.delete.go +++ b/esapi/api.search_application.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.delete_behavioral_analytics.go b/esapi/api.search_application.delete_behavioral_analytics.go index 61a8c9055b..a4924d9d34 100644 --- a/esapi/api.search_application.delete_behavioral_analytics.go +++ b/esapi/api.search_application.delete_behavioral_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.get.go b/esapi/api.search_application.get.go index 478c96e5f0..3e61496563 100644 --- a/esapi/api.search_application.get.go +++ b/esapi/api.search_application.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.get_behavioral_analytics.go b/esapi/api.search_application.get_behavioral_analytics.go index da881ea819..5e3c3b851f 100644 --- a/esapi/api.search_application.get_behavioral_analytics.go +++ b/esapi/api.search_application.get_behavioral_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.list.go b/esapi/api.search_application.list.go index 8caa449804..2216b2d0d0 100644 --- a/esapi/api.search_application.list.go +++ b/esapi/api.search_application.list.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.post_behavioral_analytics_event.go b/esapi/api.search_application.post_behavioral_analytics_event.go index 1f58229eab..918b18008d 100644 --- a/esapi/api.search_application.post_behavioral_analytics_event.go +++ b/esapi/api.search_application.post_behavioral_analytics_event.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.put.go b/esapi/api.search_application.put.go index 1f100d03a8..2be4883a9e 100644 --- a/esapi/api.search_application.put.go +++ b/esapi/api.search_application.put.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.put_behavioral_analytics.go b/esapi/api.search_application.put_behavioral_analytics.go index 9b7ff92754..a8436aa431 100644 --- a/esapi/api.search_application.put_behavioral_analytics.go +++ b/esapi/api.search_application.put_behavioral_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.render_query.go b/esapi/api.search_application.render_query.go index 817eca1b16..d6dc130fe8 100644 --- a/esapi/api.search_application.render_query.go +++ b/esapi/api.search_application.render_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_application.search.go b/esapi/api.search_application.search.go index 4b19fa834c..bc42d58e0e 100644 --- a/esapi/api.search_application.search.go +++ b/esapi/api.search_application.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_mvt.go b/esapi/api.search_mvt.go index 055fe7ba9b..ef7280d2b4 100644 --- a/esapi/api.search_mvt.go +++ b/esapi/api.search_mvt.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_shards.go b/esapi/api.search_shards.go index 8e2dd573ff..29e79b1a7f 100644 --- a/esapi/api.search_shards.go +++ b/esapi/api.search_shards.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.search_template.go b/esapi/api.search_template.go index b2beeaa0f1..d30ef4f937 100644 --- a/esapi/api.search_template.go +++ b/esapi/api.search_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.shutdown.delete_node.go b/esapi/api.shutdown.delete_node.go index 4e7306a09c..16d8759926 100644 --- a/esapi/api.shutdown.delete_node.go +++ b/esapi/api.shutdown.delete_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newShutdownDeleteNodeFunc(t Transport) ShutdownDeleteNode { @@ -51,6 +52,9 @@ type ShutdownDeleteNode func(node_id string, o ...func(*ShutdownDeleteNodeReques type ShutdownDeleteNodeRequest struct { NodeID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -96,6 +100,14 @@ func (r ShutdownDeleteNodeRequest) Do(providedCtx context.Context, transport Tra params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -174,6 +186,20 @@ func (f ShutdownDeleteNode) WithContext(v context.Context) func(*ShutdownDeleteN } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ShutdownDeleteNode) WithMasterTimeout(v time.Duration) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ShutdownDeleteNode) WithTimeout(v time.Duration) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ShutdownDeleteNode) WithPretty() func(*ShutdownDeleteNodeRequest) { return func(r *ShutdownDeleteNodeRequest) { diff --git a/esapi/api.shutdown.get_node.go b/esapi/api.shutdown.get_node.go index 10706afac3..dc8f969557 100644 --- a/esapi/api.shutdown.get_node.go +++ b/esapi/api.shutdown.get_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.shutdown.put_node.go b/esapi/api.shutdown.put_node.go index 779427b6d6..8980da6b14 100644 --- a/esapi/api.shutdown.put_node.go +++ b/esapi/api.shutdown.put_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "io" "net/http" "strings" + "time" ) func newShutdownPutNodeFunc(t Transport) ShutdownPutNode { @@ -54,6 +55,9 @@ type ShutdownPutNodeRequest struct { NodeID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -99,6 +103,14 @@ func (r ShutdownPutNodeRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -184,6 +196,20 @@ func (f ShutdownPutNode) WithContext(v context.Context) func(*ShutdownPutNodeReq } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ShutdownPutNode) WithMasterTimeout(v time.Duration) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ShutdownPutNode) WithTimeout(v time.Duration) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f ShutdownPutNode) WithPretty() func(*ShutdownPutNodeRequest) { return func(r *ShutdownPutNodeRequest) { diff --git a/esapi/api.simulate.ingest.go b/esapi/api.simulate.ingest.go index 47c58fc696..bf636e56fa 100644 --- a/esapi/api.simulate.ingest.go +++ b/esapi/api.simulate.ingest.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.cleanup_repository.go b/esapi/api.snapshot.cleanup_repository.go index fe72c15be2..40387ba755 100644 --- a/esapi/api.snapshot.cleanup_repository.go +++ b/esapi/api.snapshot.cleanup_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.clone.go b/esapi/api.snapshot.clone.go index 8526d2e663..6461efd713 100644 --- a/esapi/api.snapshot.clone.go +++ b/esapi/api.snapshot.clone.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.create.go b/esapi/api.snapshot.create.go index 4f316f5030..45a6f9eb92 100644 --- a/esapi/api.snapshot.create.go +++ b/esapi/api.snapshot.create.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.create_repository.go b/esapi/api.snapshot.create_repository.go index 7506d05f29..625fbf2661 100644 --- a/esapi/api.snapshot.create_repository.go +++ b/esapi/api.snapshot.create_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.delete.go b/esapi/api.snapshot.delete.go index 0beff6ce2e..6df6de0108 100644 --- a/esapi/api.snapshot.delete.go +++ b/esapi/api.snapshot.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.delete_repository.go b/esapi/api.snapshot.delete_repository.go index 46e262e8f6..f99d421e40 100644 --- a/esapi/api.snapshot.delete_repository.go +++ b/esapi/api.snapshot.delete_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.get.go b/esapi/api.snapshot.get.go index a5a1d42656..9cd7b0bc4e 100644 --- a/esapi/api.snapshot.get.go +++ b/esapi/api.snapshot.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.get_repository.go b/esapi/api.snapshot.get_repository.go index f52cf023b9..a5c134bbed 100644 --- a/esapi/api.snapshot.get_repository.go +++ b/esapi/api.snapshot.get_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.repository_analyze.go b/esapi/api.snapshot.repository_analyze.go index cc958fe3a8..4a69f6245e 100644 --- a/esapi/api.snapshot.repository_analyze.go +++ b/esapi/api.snapshot.repository_analyze.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.repository_verify_integrity.go b/esapi/api.snapshot.repository_verify_integrity.go new file mode 100644 index 0000000000..6d13948185 --- /dev/null +++ b/esapi/api.snapshot.repository_verify_integrity.go @@ -0,0 +1,325 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSnapshotRepositoryVerifyIntegrityFunc(t Transport) SnapshotRepositoryVerifyIntegrity { + return func(repository string, o ...func(*SnapshotRepositoryVerifyIntegrityRequest)) (*Response, error) { + var r = SnapshotRepositoryVerifyIntegrityRequest{Repository: repository} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotRepositoryVerifyIntegrity verifies the integrity of the contents of a snapshot repository +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotRepositoryVerifyIntegrity func(repository string, o ...func(*SnapshotRepositoryVerifyIntegrityRequest)) (*Response, error) + +// SnapshotRepositoryVerifyIntegrityRequest configures the Snapshot Repository Verify Integrity API request. +type SnapshotRepositoryVerifyIntegrityRequest struct { + Repository string + + BlobThreadPoolConcurrency *int + IndexSnapshotVerificationConcurrency *int + IndexVerificationConcurrency *int + MaxBytesPerSec string + MaxFailedShardSnapshots *int + MetaThreadPoolConcurrency *int + SnapshotVerificationConcurrency *int + VerifyBlobContents *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotRepositoryVerifyIntegrityRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_verify_integrity") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len("_verify_integrity")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString("_verify_integrity") + + params = make(map[string]string) + + if r.BlobThreadPoolConcurrency != nil { + params["blob_thread_pool_concurrency"] = strconv.FormatInt(int64(*r.BlobThreadPoolConcurrency), 10) + } + + if r.IndexSnapshotVerificationConcurrency != nil { + params["index_snapshot_verification_concurrency"] = strconv.FormatInt(int64(*r.IndexSnapshotVerificationConcurrency), 10) + } + + if r.IndexVerificationConcurrency != nil { + params["index_verification_concurrency"] = strconv.FormatInt(int64(*r.IndexVerificationConcurrency), 10) + } + + if r.MaxBytesPerSec != "" { + params["max_bytes_per_sec"] = r.MaxBytesPerSec + } + + if r.MaxFailedShardSnapshots != nil { + params["max_failed_shard_snapshots"] = strconv.FormatInt(int64(*r.MaxFailedShardSnapshots), 10) + } + + if r.MetaThreadPoolConcurrency != nil { + params["meta_thread_pool_concurrency"] = strconv.FormatInt(int64(*r.MetaThreadPoolConcurrency), 10) + } + + if r.SnapshotVerificationConcurrency != nil { + params["snapshot_verification_concurrency"] = strconv.FormatInt(int64(*r.SnapshotVerificationConcurrency), 10) + } + + if r.VerifyBlobContents != nil { + params["verify_blob_contents"] = strconv.FormatBool(*r.VerifyBlobContents) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.repository_verify_integrity") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.repository_verify_integrity") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotRepositoryVerifyIntegrity) WithContext(v context.Context) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.ctx = v + } +} + +// WithBlobThreadPoolConcurrency - number of threads to use for reading blob contents. +func (f SnapshotRepositoryVerifyIntegrity) WithBlobThreadPoolConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.BlobThreadPoolConcurrency = &v + } +} + +// WithIndexSnapshotVerificationConcurrency - number of snapshots to verify concurrently within each index. +func (f SnapshotRepositoryVerifyIntegrity) WithIndexSnapshotVerificationConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.IndexSnapshotVerificationConcurrency = &v + } +} + +// WithIndexVerificationConcurrency - number of indices to verify concurrently. +func (f SnapshotRepositoryVerifyIntegrity) WithIndexVerificationConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.IndexVerificationConcurrency = &v + } +} + +// WithMaxBytesPerSec - rate limit for individual blob verification. +func (f SnapshotRepositoryVerifyIntegrity) WithMaxBytesPerSec(v string) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.MaxBytesPerSec = v + } +} + +// WithMaxFailedShardSnapshots - maximum permitted number of failed shard snapshots. +func (f SnapshotRepositoryVerifyIntegrity) WithMaxFailedShardSnapshots(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.MaxFailedShardSnapshots = &v + } +} + +// WithMetaThreadPoolConcurrency - number of threads to use for reading metadata. +func (f SnapshotRepositoryVerifyIntegrity) WithMetaThreadPoolConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.MetaThreadPoolConcurrency = &v + } +} + +// WithSnapshotVerificationConcurrency - number of snapshots to verify concurrently. +func (f SnapshotRepositoryVerifyIntegrity) WithSnapshotVerificationConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.SnapshotVerificationConcurrency = &v + } +} + +// WithVerifyBlobContents - whether to verify the contents of individual blobs. +func (f SnapshotRepositoryVerifyIntegrity) WithVerifyBlobContents(v bool) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.VerifyBlobContents = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotRepositoryVerifyIntegrity) WithPretty() func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotRepositoryVerifyIntegrity) WithHuman() func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotRepositoryVerifyIntegrity) WithErrorTrace() func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotRepositoryVerifyIntegrity) WithFilterPath(v ...string) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotRepositoryVerifyIntegrity) WithHeader(h map[string]string) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotRepositoryVerifyIntegrity) WithOpaqueID(s string) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.snapshot.restore.go b/esapi/api.snapshot.restore.go index ca4234a819..b2526ecb94 100644 --- a/esapi/api.snapshot.restore.go +++ b/esapi/api.snapshot.restore.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.status.go b/esapi/api.snapshot.status.go index fcee081d9d..a96936eaf6 100644 --- a/esapi/api.snapshot.status.go +++ b/esapi/api.snapshot.status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.snapshot.verify_repository.go b/esapi/api.snapshot.verify_repository.go index c0919673b0..870917d203 100644 --- a/esapi/api.snapshot.verify_repository.go +++ b/esapi/api.snapshot.verify_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.delete_synonym.go b/esapi/api.synonyms.delete_synonym.go index 6f2eb72d4d..ce998b4956 100644 --- a/esapi/api.synonyms.delete_synonym.go +++ b/esapi/api.synonyms.delete_synonym.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.delete_synonym_rule.go b/esapi/api.synonyms.delete_synonym_rule.go index 390122712b..2e418287fd 100644 --- a/esapi/api.synonyms.delete_synonym_rule.go +++ b/esapi/api.synonyms.delete_synonym_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.get_synonym.go b/esapi/api.synonyms.get_synonym.go index 05f4fec4bc..b31250bf72 100644 --- a/esapi/api.synonyms.get_synonym.go +++ b/esapi/api.synonyms.get_synonym.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.get_synonym_rule.go b/esapi/api.synonyms.get_synonym_rule.go index c9225d981e..0c2963e0e0 100644 --- a/esapi/api.synonyms.get_synonym_rule.go +++ b/esapi/api.synonyms.get_synonym_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.get_synonyms_sets.go b/esapi/api.synonyms.get_synonyms_sets.go index f0b21a02f1..19381e9947 100644 --- a/esapi/api.synonyms.get_synonyms_sets.go +++ b/esapi/api.synonyms.get_synonyms_sets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.put_synonym.go b/esapi/api.synonyms.put_synonym.go index 4ec9236325..d4d2a8308c 100644 --- a/esapi/api.synonyms.put_synonym.go +++ b/esapi/api.synonyms.put_synonym.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.synonyms.put_synonym_rule.go b/esapi/api.synonyms.put_synonym_rule.go index 6645dc37a9..6c6af9bd4d 100644 --- a/esapi/api.synonyms.put_synonym_rule.go +++ b/esapi/api.synonyms.put_synonym_rule.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.tasks.cancel.go b/esapi/api.tasks.cancel.go index 0af16f51cc..9c2e48c0de 100644 --- a/esapi/api.tasks.cancel.go +++ b/esapi/api.tasks.cancel.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.tasks.get.go b/esapi/api.tasks.get.go index a32f48ca4f..ce695599e7 100644 --- a/esapi/api.tasks.get.go +++ b/esapi/api.tasks.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.tasks.list.go b/esapi/api.tasks.list.go index 944bdd752d..91fe0e9e03 100644 --- a/esapi/api.tasks.list.go +++ b/esapi/api.tasks.list.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.terms_enum.go b/esapi/api.terms_enum.go index f244464c90..8a59ab1bfd 100644 --- a/esapi/api.terms_enum.go +++ b/esapi/api.terms_enum.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.termvectors.go b/esapi/api.termvectors.go index 1ff91335f9..ea58f97d54 100644 --- a/esapi/api.termvectors.go +++ b/esapi/api.termvectors.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.update.go b/esapi/api.update.go index 2ace2ab3de..a03f118f9e 100644 --- a/esapi/api.update.go +++ b/esapi/api.update.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -57,18 +57,19 @@ type UpdateRequest struct { Body io.Reader - IfPrimaryTerm *int - IfSeqNo *int - Lang string - Refresh string - RequireAlias *bool - RetryOnConflict *int - Routing string - Source []string - SourceExcludes []string - SourceIncludes []string - Timeout time.Duration - WaitForActiveShards string + IfPrimaryTerm *int + IfSeqNo *int + IncludeSourceOnError *bool + Lang string + Refresh string + RequireAlias *bool + RetryOnConflict *int + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + Timeout time.Duration + WaitForActiveShards string Pretty bool Human bool @@ -126,6 +127,10 @@ func (r UpdateRequest) Do(providedCtx context.Context, transport Transport) (*Re params["if_seq_no"] = strconv.FormatInt(int64(*r.IfSeqNo), 10) } + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + if r.Lang != "" { params["lang"] = r.Lang } @@ -265,6 +270,13 @@ func (f Update) WithIfSeqNo(v int) func(*UpdateRequest) { } } +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Update) WithIncludeSourceOnError(v bool) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.IncludeSourceOnError = &v + } +} + // WithLang - the script language (default: painless). func (f Update) WithLang(v string) func(*UpdateRequest) { return func(r *UpdateRequest) { diff --git a/esapi/api.update_by_query.go b/esapi/api.update_by_query.go index 204e8f988e..3cb6ee8f60 100644 --- a/esapi/api.update_by_query.go +++ b/esapi/api.update_by_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.update_by_query_rethrottle.go b/esapi/api.update_by_query_rethrottle.go index bd2a4e808c..39ad1c19cd 100644 --- a/esapi/api.update_by_query_rethrottle.go +++ b/esapi/api.update_by_query_rethrottle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.async_search.delete.go b/esapi/api.xpack.async_search.delete.go index 13b0b4ad04..5b0e9df0bb 100644 --- a/esapi/api.xpack.async_search.delete.go +++ b/esapi/api.xpack.async_search.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.async_search.get.go b/esapi/api.xpack.async_search.get.go index 20d89af457..3b0874a26a 100644 --- a/esapi/api.xpack.async_search.get.go +++ b/esapi/api.xpack.async_search.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.async_search.status.go b/esapi/api.xpack.async_search.status.go index 42c903652f..cd7f9c985f 100644 --- a/esapi/api.xpack.async_search.status.go +++ b/esapi/api.xpack.async_search.status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.async_search.submit.go b/esapi/api.xpack.async_search.submit.go index c04014c0f8..266aa18ae4 100644 --- a/esapi/api.xpack.async_search.submit.go +++ b/esapi/api.xpack.async_search.submit.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -62,6 +62,7 @@ type AsyncSearchSubmitRequest struct { Analyzer string AnalyzeWildcard *bool BatchedReduceSize *int + CcsMinimizeRoundtrips *bool DefaultOperator string Df string DocvalueFields []string @@ -77,6 +78,7 @@ type AsyncSearchSubmitRequest struct { Preference string Query string RequestCache *bool + RestTotalHitsAsInt *bool Routing []string SearchType string SeqNoPrimaryTerm *bool @@ -164,6 +166,10 @@ func (r AsyncSearchSubmitRequest) Do(providedCtx context.Context, transport Tran params["batched_reduce_size"] = strconv.FormatInt(int64(*r.BatchedReduceSize), 10) } + if r.CcsMinimizeRoundtrips != nil { + params["ccs_minimize_roundtrips"] = strconv.FormatBool(*r.CcsMinimizeRoundtrips) + } + if r.DefaultOperator != "" { params["default_operator"] = r.DefaultOperator } @@ -224,6 +230,10 @@ func (r AsyncSearchSubmitRequest) Do(providedCtx context.Context, transport Tran params["request_cache"] = strconv.FormatBool(*r.RequestCache) } + if r.RestTotalHitsAsInt != nil { + params["rest_total_hits_as_int"] = strconv.FormatBool(*r.RestTotalHitsAsInt) + } + if len(r.Routing) > 0 { params["routing"] = strings.Join(r.Routing, ",") } @@ -442,6 +452,13 @@ func (f AsyncSearchSubmit) WithBatchedReduceSize(v int) func(*AsyncSearchSubmitR } } +// WithCcsMinimizeRoundtrips - when doing a cross-cluster search, setting it to true may improve overall search latency, particularly when searching clusters with a large number of shards. however, when set to true, the progress of searches on the remote clusters will not be received until the search finishes on all clusters.. +func (f AsyncSearchSubmit) WithCcsMinimizeRoundtrips(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.CcsMinimizeRoundtrips = &v + } +} + // WithDefaultOperator - the default operator for query string query (and or or). func (f AsyncSearchSubmit) WithDefaultOperator(v string) func(*AsyncSearchSubmitRequest) { return func(r *AsyncSearchSubmitRequest) { @@ -547,6 +564,13 @@ func (f AsyncSearchSubmit) WithRequestCache(v bool) func(*AsyncSearchSubmitReque } } +// WithRestTotalHitsAsInt - indicates whether hits.total should be rendered as an integer or an object in the rest search response. +func (f AsyncSearchSubmit) WithRestTotalHitsAsInt(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.RestTotalHitsAsInt = &v + } +} + // WithRouting - a list of specific routing values. func (f AsyncSearchSubmit) WithRouting(v ...string) func(*AsyncSearchSubmitRequest) { return func(r *AsyncSearchSubmitRequest) { diff --git a/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go b/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go index a1045820ce..d090c71475 100644 --- a/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go +++ b/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go b/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go index 0ed535ad36..b34426b6e9 100644 --- a/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go +++ b/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.autoscaling.get_autoscaling_policy.go b/esapi/api.xpack.autoscaling.get_autoscaling_policy.go index 050f7ec064..b49a522939 100644 --- a/esapi/api.xpack.autoscaling.get_autoscaling_policy.go +++ b/esapi/api.xpack.autoscaling.get_autoscaling_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.autoscaling.put_autoscaling_policy.go b/esapi/api.xpack.autoscaling.put_autoscaling_policy.go index e16d51dcab..9f7a87510b 100644 --- a/esapi/api.xpack.autoscaling.put_autoscaling_policy.go +++ b/esapi/api.xpack.autoscaling.put_autoscaling_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.ml_data_frame_analytics.go b/esapi/api.xpack.cat.ml_data_frame_analytics.go index 4de4da7d2c..04edca96ac 100644 --- a/esapi/api.xpack.cat.ml_data_frame_analytics.go +++ b/esapi/api.xpack.cat.ml_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.ml_datafeeds.go b/esapi/api.xpack.cat.ml_datafeeds.go index 42fa88dee0..98d9b735cb 100644 --- a/esapi/api.xpack.cat.ml_datafeeds.go +++ b/esapi/api.xpack.cat.ml_datafeeds.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.ml_jobs.go b/esapi/api.xpack.cat.ml_jobs.go index 4806a3b0f4..66e16f34ac 100644 --- a/esapi/api.xpack.cat.ml_jobs.go +++ b/esapi/api.xpack.cat.ml_jobs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.ml_trained_models.go b/esapi/api.xpack.cat.ml_trained_models.go index b540c68315..c32f0fda24 100644 --- a/esapi/api.xpack.cat.ml_trained_models.go +++ b/esapi/api.xpack.cat.ml_trained_models.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.cat.transforms.go b/esapi/api.xpack.cat.transforms.go index cf0ce6d972..56b91dfd97 100644 --- a/esapi/api.xpack.cat.transforms.go +++ b/esapi/api.xpack.cat.transforms.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.delete_auto_follow_pattern.go b/esapi/api.xpack.ccr.delete_auto_follow_pattern.go index aeebabaea0..0fbc5bdeb3 100644 --- a/esapi/api.xpack.ccr.delete_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.delete_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.follow.go b/esapi/api.xpack.ccr.follow.go index 5ea0cae439..9c7b31702b 100644 --- a/esapi/api.xpack.ccr.follow.go +++ b/esapi/api.xpack.ccr.follow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.follow_info.go b/esapi/api.xpack.ccr.follow_info.go index 6e281fea18..d616888436 100644 --- a/esapi/api.xpack.ccr.follow_info.go +++ b/esapi/api.xpack.ccr.follow_info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.follow_stats.go b/esapi/api.xpack.ccr.follow_stats.go index 17d20f6a9e..a582d87ec6 100644 --- a/esapi/api.xpack.ccr.follow_stats.go +++ b/esapi/api.xpack.ccr.follow_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.forget_follower.go b/esapi/api.xpack.ccr.forget_follower.go index 13ae28fb8e..4b8c57ba71 100644 --- a/esapi/api.xpack.ccr.forget_follower.go +++ b/esapi/api.xpack.ccr.forget_follower.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.get_auto_follow_pattern.go b/esapi/api.xpack.ccr.get_auto_follow_pattern.go index 9d0e575a5e..cac4bcaf37 100644 --- a/esapi/api.xpack.ccr.get_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.get_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.pause_auto_follow_pattern.go b/esapi/api.xpack.ccr.pause_auto_follow_pattern.go index 2874991d9c..82f13bdc2e 100644 --- a/esapi/api.xpack.ccr.pause_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.pause_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.pause_follow.go b/esapi/api.xpack.ccr.pause_follow.go index bf2018ea40..1a13668763 100644 --- a/esapi/api.xpack.ccr.pause_follow.go +++ b/esapi/api.xpack.ccr.pause_follow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.put_auto_follow_pattern.go b/esapi/api.xpack.ccr.put_auto_follow_pattern.go index aaaf1eb0ae..10cd3b7f35 100644 --- a/esapi/api.xpack.ccr.put_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.put_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.resume_auto_follow_pattern.go b/esapi/api.xpack.ccr.resume_auto_follow_pattern.go index b00bd3da24..e544ac6e29 100644 --- a/esapi/api.xpack.ccr.resume_auto_follow_pattern.go +++ b/esapi/api.xpack.ccr.resume_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.resume_follow.go b/esapi/api.xpack.ccr.resume_follow.go index 5daa658bee..5f697c4773 100644 --- a/esapi/api.xpack.ccr.resume_follow.go +++ b/esapi/api.xpack.ccr.resume_follow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.stats.go b/esapi/api.xpack.ccr.stats.go index f64c68f070..222a399060 100644 --- a/esapi/api.xpack.ccr.stats.go +++ b/esapi/api.xpack.ccr.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ccr.unfollow.go b/esapi/api.xpack.ccr.unfollow.go index 10b44195bd..65d2929676 100644 --- a/esapi/api.xpack.ccr.unfollow.go +++ b/esapi/api.xpack.ccr.unfollow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.close_point_in_time.go b/esapi/api.xpack.close_point_in_time.go index 7946091da1..df6b172666 100644 --- a/esapi/api.xpack.close_point_in_time.go +++ b/esapi/api.xpack.close_point_in_time.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.enrich.delete_policy.go b/esapi/api.xpack.enrich.delete_policy.go index fd28aa72e0..dc106677e9 100644 --- a/esapi/api.xpack.enrich.delete_policy.go +++ b/esapi/api.xpack.enrich.delete_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.enrich.execute_policy.go b/esapi/api.xpack.enrich.execute_policy.go index 946e53fb7c..0d79871c1e 100644 --- a/esapi/api.xpack.enrich.execute_policy.go +++ b/esapi/api.xpack.enrich.execute_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.enrich.get_policy.go b/esapi/api.xpack.enrich.get_policy.go index f19dba4982..b81054cfab 100644 --- a/esapi/api.xpack.enrich.get_policy.go +++ b/esapi/api.xpack.enrich.get_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.enrich.put_policy.go b/esapi/api.xpack.enrich.put_policy.go index 4003185845..adc6320828 100644 --- a/esapi/api.xpack.enrich.put_policy.go +++ b/esapi/api.xpack.enrich.put_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.enrich.stats.go b/esapi/api.xpack.enrich.stats.go index fa1476a17c..aac4e4d511 100644 --- a/esapi/api.xpack.enrich.stats.go +++ b/esapi/api.xpack.enrich.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.eql.delete.go b/esapi/api.xpack.eql.delete.go index 1d56a0fe98..8b1cb0e675 100644 --- a/esapi/api.xpack.eql.delete.go +++ b/esapi/api.xpack.eql.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.eql.get.go b/esapi/api.xpack.eql.get.go index 983bbe6162..5527aeeaa2 100644 --- a/esapi/api.xpack.eql.get.go +++ b/esapi/api.xpack.eql.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.eql.get_status.go b/esapi/api.xpack.eql.get_status.go index 6263993aa7..8c0dc3cdd3 100644 --- a/esapi/api.xpack.eql.get_status.go +++ b/esapi/api.xpack.eql.get_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.eql.search.go b/esapi/api.xpack.eql.search.go index 4637a11cbd..28ef4f4d01 100644 --- a/esapi/api.xpack.eql.search.go +++ b/esapi/api.xpack.eql.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -56,9 +56,11 @@ type EqlSearchRequest struct { Body io.Reader - KeepAlive time.Duration - KeepOnCompletion *bool - WaitForCompletionTimeout time.Duration + AllowPartialSearchResults *bool + AllowPartialSequenceResults *bool + KeepAlive time.Duration + KeepOnCompletion *bool + WaitForCompletionTimeout time.Duration Pretty bool Human bool @@ -105,6 +107,14 @@ func (r EqlSearchRequest) Do(providedCtx context.Context, transport Transport) ( params = make(map[string]string) + if r.AllowPartialSearchResults != nil { + params["allow_partial_search_results"] = strconv.FormatBool(*r.AllowPartialSearchResults) + } + + if r.AllowPartialSequenceResults != nil { + params["allow_partial_sequence_results"] = strconv.FormatBool(*r.AllowPartialSequenceResults) + } + if r.KeepAlive != 0 { params["keep_alive"] = formatDuration(r.KeepAlive) } @@ -202,6 +212,20 @@ func (f EqlSearch) WithContext(v context.Context) func(*EqlSearchRequest) { } } +// WithAllowPartialSearchResults - control whether the query should keep running in case of shard failures, and return partial results. +func (f EqlSearch) WithAllowPartialSearchResults(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.AllowPartialSearchResults = &v + } +} + +// WithAllowPartialSequenceResults - control whether a sequence query should return partial results or no results at all in case of shard failures. this option has effect only if [allow_partial_search_results] is true.. +func (f EqlSearch) WithAllowPartialSequenceResults(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.AllowPartialSequenceResults = &v + } +} + // WithKeepAlive - update the time interval in which the results (partial or final) for this search will be available. func (f EqlSearch) WithKeepAlive(v time.Duration) func(*EqlSearchRequest) { return func(r *EqlSearchRequest) { diff --git a/esapi/api.xpack.esql.async_query.go b/esapi/api.xpack.esql.async_query.go index 3a61e36a36..56b49b7811 100644 --- a/esapi/api.xpack.esql.async_query.go +++ b/esapi/api.xpack.esql.async_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.esql.async_query_delete.go b/esapi/api.xpack.esql.async_query_delete.go new file mode 100644 index 0000000000..dd22a5a5ab --- /dev/null +++ b/esapi/api.xpack.esql.async_query_delete.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEsqlAsyncQueryDeleteFunc(t Transport) EsqlAsyncQueryDelete { + return func(id string, o ...func(*EsqlAsyncQueryDeleteRequest)) (*Response, error) { + var r = EsqlAsyncQueryDeleteRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlAsyncQueryDelete - Delete an async query request given its ID. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-delete-api.html. +type EsqlAsyncQueryDelete func(id string, o ...func(*EsqlAsyncQueryDeleteRequest)) (*Response, error) + +// EsqlAsyncQueryDeleteRequest configures the Esql Async Query Delete API request. +type EsqlAsyncQueryDeleteRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlAsyncQueryDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_query") + 1 + len("async") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_delete") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlAsyncQueryDelete) WithContext(v context.Context) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlAsyncQueryDelete) WithPretty() func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlAsyncQueryDelete) WithHuman() func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlAsyncQueryDelete) WithErrorTrace() func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlAsyncQueryDelete) WithFilterPath(v ...string) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlAsyncQueryDelete) WithHeader(h map[string]string) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlAsyncQueryDelete) WithOpaqueID(s string) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.xpack.esql.async_query_get.go b/esapi/api.xpack.esql.async_query_get.go index b2f644c9bc..6040f8e8b7 100644 --- a/esapi/api.xpack.esql.async_query_get.go +++ b/esapi/api.xpack.esql.async_query_get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.esql.async_query_stop.go b/esapi/api.xpack.esql.async_query_stop.go new file mode 100644 index 0000000000..bd4106da15 --- /dev/null +++ b/esapi/api.xpack.esql.async_query_stop.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEsqlAsyncQueryStopFunc(t Transport) EsqlAsyncQueryStop { + return func(id string, o ...func(*EsqlAsyncQueryStopRequest)) (*Response, error) { + var r = EsqlAsyncQueryStopRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlAsyncQueryStop - Stops a previously submitted async query request given its ID and collects the results. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-stop-api.html. +type EsqlAsyncQueryStop func(id string, o ...func(*EsqlAsyncQueryStopRequest)) (*Response, error) + +// EsqlAsyncQueryStopRequest configures the Esql Async Query Stop API request. +type EsqlAsyncQueryStopRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlAsyncQueryStopRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_query") + 1 + len("async") + 1 + len(r.DocumentID) + 1 + len("stop")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + path.WriteString("/") + path.WriteString("stop") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_stop") + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_stop") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlAsyncQueryStop) WithContext(v context.Context) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlAsyncQueryStop) WithPretty() func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlAsyncQueryStop) WithHuman() func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlAsyncQueryStop) WithErrorTrace() func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlAsyncQueryStop) WithFilterPath(v ...string) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlAsyncQueryStop) WithHeader(h map[string]string) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlAsyncQueryStop) WithOpaqueID(s string) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.xpack.esql.query.go b/esapi/api.xpack.esql.query.go index cc4b59bfe4..9ecf786ac2 100644 --- a/esapi/api.xpack.esql.query.go +++ b/esapi/api.xpack.esql.query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.graph.explore.go b/esapi/api.xpack.graph.explore.go index 6300452260..029d206e85 100644 --- a/esapi/api.xpack.graph.explore.go +++ b/esapi/api.xpack.graph.explore.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.delete_lifecycle.go b/esapi/api.xpack.ilm.delete_lifecycle.go index 5c99d4fe6b..b3dde24651 100644 --- a/esapi/api.xpack.ilm.delete_lifecycle.go +++ b/esapi/api.xpack.ilm.delete_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.explain_lifecycle.go b/esapi/api.xpack.ilm.explain_lifecycle.go index 372f9d9dd8..ec08553c74 100644 --- a/esapi/api.xpack.ilm.explain_lifecycle.go +++ b/esapi/api.xpack.ilm.explain_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.get_lifecycle.go b/esapi/api.xpack.ilm.get_lifecycle.go index df4bafa5c1..c6fcc23b92 100644 --- a/esapi/api.xpack.ilm.get_lifecycle.go +++ b/esapi/api.xpack.ilm.get_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.get_status.go b/esapi/api.xpack.ilm.get_status.go index 507b66f38a..532f29d95a 100644 --- a/esapi/api.xpack.ilm.get_status.go +++ b/esapi/api.xpack.ilm.get_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.migrate_to_data_tiers.go b/esapi/api.xpack.ilm.migrate_to_data_tiers.go index 7d4fae2f46..66ffebf1dc 100644 --- a/esapi/api.xpack.ilm.migrate_to_data_tiers.go +++ b/esapi/api.xpack.ilm.migrate_to_data_tiers.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.move_to_step.go b/esapi/api.xpack.ilm.move_to_step.go index b00a1c3a30..6d633d3580 100644 --- a/esapi/api.xpack.ilm.move_to_step.go +++ b/esapi/api.xpack.ilm.move_to_step.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.put_lifecycle.go b/esapi/api.xpack.ilm.put_lifecycle.go index 299f80a481..05f78d2053 100644 --- a/esapi/api.xpack.ilm.put_lifecycle.go +++ b/esapi/api.xpack.ilm.put_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.remove_policy.go b/esapi/api.xpack.ilm.remove_policy.go index a5ec25158a..1a3ece0e14 100644 --- a/esapi/api.xpack.ilm.remove_policy.go +++ b/esapi/api.xpack.ilm.remove_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.retry.go b/esapi/api.xpack.ilm.retry.go index 94f0cee4f6..0c3d21ee6c 100644 --- a/esapi/api.xpack.ilm.retry.go +++ b/esapi/api.xpack.ilm.retry.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.start.go b/esapi/api.xpack.ilm.start.go index 429336f3f8..a6c37532c4 100644 --- a/esapi/api.xpack.ilm.start.go +++ b/esapi/api.xpack.ilm.start.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ilm.stop.go b/esapi/api.xpack.ilm.stop.go index 1d1a772187..3a43fd9d6b 100644 --- a/esapi/api.xpack.ilm.stop.go +++ b/esapi/api.xpack.ilm.stop.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.create_data_stream.go b/esapi/api.xpack.indices.create_data_stream.go index be1d521e64..4a0056e98c 100644 --- a/esapi/api.xpack.indices.create_data_stream.go +++ b/esapi/api.xpack.indices.create_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.data_streams_stats.go b/esapi/api.xpack.indices.data_streams_stats.go index 0576f1e67f..038056481f 100644 --- a/esapi/api.xpack.indices.data_streams_stats.go +++ b/esapi/api.xpack.indices.data_streams_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.delete_data_stream.go b/esapi/api.xpack.indices.delete_data_stream.go index 4efa32233b..6aa44348fd 100644 --- a/esapi/api.xpack.indices.delete_data_stream.go +++ b/esapi/api.xpack.indices.delete_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.get_data_stream.go b/esapi/api.xpack.indices.get_data_stream.go index 6151526ff3..97b0f21b26 100644 --- a/esapi/api.xpack.indices.get_data_stream.go +++ b/esapi/api.xpack.indices.get_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -56,6 +56,7 @@ type IndicesGetDataStreamRequest struct { ExpandWildcards string IncludeDefaults *bool MasterTimeout time.Duration + Verbose *bool Pretty bool Human bool @@ -114,6 +115,10 @@ func (r IndicesGetDataStreamRequest) Do(providedCtx context.Context, transport T params["master_timeout"] = formatDuration(r.MasterTimeout) } + if r.Verbose != nil { + params["verbose"] = strconv.FormatBool(*r.Verbose) + } + if r.Pretty { params["pretty"] = "true" } @@ -220,6 +225,13 @@ func (f IndicesGetDataStream) WithMasterTimeout(v time.Duration) func(*IndicesGe } } +// WithVerbose - whether the maximum timestamp for each data stream should be calculated and returned (default: false). +func (f IndicesGetDataStream) WithVerbose(v bool) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.Verbose = &v + } +} + // WithPretty makes the response body pretty-printed. func (f IndicesGetDataStream) WithPretty() func(*IndicesGetDataStreamRequest) { return func(r *IndicesGetDataStreamRequest) { diff --git a/esapi/api.xpack.indices.migrate_to_data_stream.go b/esapi/api.xpack.indices.migrate_to_data_stream.go index bcfc3d818c..1429deeb20 100644 --- a/esapi/api.xpack.indices.migrate_to_data_stream.go +++ b/esapi/api.xpack.indices.migrate_to_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.promote_data_stream.go b/esapi/api.xpack.indices.promote_data_stream.go index 253c709d10..86bbb1b6b2 100644 --- a/esapi/api.xpack.indices.promote_data_stream.go +++ b/esapi/api.xpack.indices.promote_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.reload_search_analyzers.go b/esapi/api.xpack.indices.reload_search_analyzers.go index 1833d962a6..c3e31bcc45 100644 --- a/esapi/api.xpack.indices.reload_search_analyzers.go +++ b/esapi/api.xpack.indices.reload_search_analyzers.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.indices.unfreeze.go b/esapi/api.xpack.indices.unfreeze.go index d0614d11f9..8bfe8f4dbe 100644 --- a/esapi/api.xpack.indices.unfreeze.go +++ b/esapi/api.xpack.indices.unfreeze.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.delete.go b/esapi/api.xpack.license.delete.go index 993bcc9cde..bc54237841 100644 --- a/esapi/api.xpack.license.delete.go +++ b/esapi/api.xpack.license.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.get.go b/esapi/api.xpack.license.get.go index 073573e57a..a5f2e4dd2d 100644 --- a/esapi/api.xpack.license.get.go +++ b/esapi/api.xpack.license.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.get_basic_status.go b/esapi/api.xpack.license.get_basic_status.go index f5768acd6c..7fa614c065 100644 --- a/esapi/api.xpack.license.get_basic_status.go +++ b/esapi/api.xpack.license.get_basic_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.get_trial_status.go b/esapi/api.xpack.license.get_trial_status.go index 50aecfcbe1..e98f1676c6 100644 --- a/esapi/api.xpack.license.get_trial_status.go +++ b/esapi/api.xpack.license.get_trial_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.post.go b/esapi/api.xpack.license.post.go index da5ca77004..0ba90ccfdc 100644 --- a/esapi/api.xpack.license.post.go +++ b/esapi/api.xpack.license.post.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.post_start_basic.go b/esapi/api.xpack.license.post_start_basic.go index 691ce447c7..6c679bd55e 100644 --- a/esapi/api.xpack.license.post_start_basic.go +++ b/esapi/api.xpack.license.post_start_basic.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.license.post_start_trial.go b/esapi/api.xpack.license.post_start_trial.go index c71b21ddfb..b80c9d5322 100644 --- a/esapi/api.xpack.license.post_start_trial.go +++ b/esapi/api.xpack.license.post_start_trial.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -53,7 +53,6 @@ type LicensePostStartTrial func(o ...func(*LicensePostStartTrialRequest)) (*Resp type LicensePostStartTrialRequest struct { Acknowledge *bool MasterTimeout time.Duration - Timeout time.Duration DocumentType string Pretty bool @@ -101,10 +100,6 @@ func (r LicensePostStartTrialRequest) Do(providedCtx context.Context, transport params["master_timeout"] = formatDuration(r.MasterTimeout) } - if r.Timeout != 0 { - params["timeout"] = formatDuration(r.Timeout) - } - if r.DocumentType != "" { params["type"] = r.DocumentType } @@ -201,13 +196,6 @@ func (f LicensePostStartTrial) WithMasterTimeout(v time.Duration) func(*LicenseP } } -// WithTimeout - timeout for acknowledgement of update from all nodes in cluster. -func (f LicensePostStartTrial) WithTimeout(v time.Duration) func(*LicensePostStartTrialRequest) { - return func(r *LicensePostStartTrialRequest) { - r.Timeout = v - } -} - // WithDocumentType - the type of trial license to generate (default: "trial"). func (f LicensePostStartTrial) WithDocumentType(v string) func(*LicensePostStartTrialRequest) { return func(r *LicensePostStartTrialRequest) { diff --git a/esapi/api.xpack.logstash.delete_pipeline.go b/esapi/api.xpack.logstash.delete_pipeline.go index 343a02f717..773b28b742 100644 --- a/esapi/api.xpack.logstash.delete_pipeline.go +++ b/esapi/api.xpack.logstash.delete_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.logstash.get_pipeline.go b/esapi/api.xpack.logstash.get_pipeline.go index 7bdc831ca6..fe218051fc 100644 --- a/esapi/api.xpack.logstash.get_pipeline.go +++ b/esapi/api.xpack.logstash.get_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.logstash.put_pipeline.go b/esapi/api.xpack.logstash.put_pipeline.go index 0daabf6a49..86e8e18daf 100644 --- a/esapi/api.xpack.logstash.put_pipeline.go +++ b/esapi/api.xpack.logstash.put_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.migration.deprecations.go b/esapi/api.xpack.migration.deprecations.go index cf67b78591..477991665a 100644 --- a/esapi/api.xpack.migration.deprecations.go +++ b/esapi/api.xpack.migration.deprecations.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.migration.get_feature_upgrade_status.go b/esapi/api.xpack.migration.get_feature_upgrade_status.go index 77ed8afad4..6fb471ce91 100644 --- a/esapi/api.xpack.migration.get_feature_upgrade_status.go +++ b/esapi/api.xpack.migration.get_feature_upgrade_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.migration.post_feature_upgrade.go b/esapi/api.xpack.migration.post_feature_upgrade.go index cda22ef994..3159ddd5ea 100644 --- a/esapi/api.xpack.migration.post_feature_upgrade.go +++ b/esapi/api.xpack.migration.post_feature_upgrade.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go b/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go index 63d100b96d..e35f49acd7 100644 --- a/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go +++ b/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.close_job.go b/esapi/api.xpack.ml.close_job.go index 3e9cf686d4..b4d7412028 100644 --- a/esapi/api.xpack.ml.close_job.go +++ b/esapi/api.xpack.ml.close_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_calendar.go b/esapi/api.xpack.ml.delete_calendar.go index 194693a113..b84af24907 100644 --- a/esapi/api.xpack.ml.delete_calendar.go +++ b/esapi/api.xpack.ml.delete_calendar.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_calendar_event.go b/esapi/api.xpack.ml.delete_calendar_event.go index d0dec58aa7..a74d196677 100644 --- a/esapi/api.xpack.ml.delete_calendar_event.go +++ b/esapi/api.xpack.ml.delete_calendar_event.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_calendar_job.go b/esapi/api.xpack.ml.delete_calendar_job.go index 5f3cecaaf1..575fcf6f5b 100644 --- a/esapi/api.xpack.ml.delete_calendar_job.go +++ b/esapi/api.xpack.ml.delete_calendar_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_data_frame_analytics.go b/esapi/api.xpack.ml.delete_data_frame_analytics.go index 91b14b8bd4..3d866dc668 100644 --- a/esapi/api.xpack.ml.delete_data_frame_analytics.go +++ b/esapi/api.xpack.ml.delete_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_datafeed.go b/esapi/api.xpack.ml.delete_datafeed.go index 5b6a52a489..591519ac60 100644 --- a/esapi/api.xpack.ml.delete_datafeed.go +++ b/esapi/api.xpack.ml.delete_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_expired_data.go b/esapi/api.xpack.ml.delete_expired_data.go index f63ccc8041..bfc5ad2c2e 100644 --- a/esapi/api.xpack.ml.delete_expired_data.go +++ b/esapi/api.xpack.ml.delete_expired_data.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_filter.go b/esapi/api.xpack.ml.delete_filter.go index 4f9f55db54..2e7e8494f1 100644 --- a/esapi/api.xpack.ml.delete_filter.go +++ b/esapi/api.xpack.ml.delete_filter.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_forecast.go b/esapi/api.xpack.ml.delete_forecast.go index f7e48836f6..8a9b15e0f1 100644 --- a/esapi/api.xpack.ml.delete_forecast.go +++ b/esapi/api.xpack.ml.delete_forecast.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_job.go b/esapi/api.xpack.ml.delete_job.go index 0f3653430c..144bd46d64 100644 --- a/esapi/api.xpack.ml.delete_job.go +++ b/esapi/api.xpack.ml.delete_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_model_snapshot.go b/esapi/api.xpack.ml.delete_model_snapshot.go index 4fd11e5c3f..2739876f66 100644 --- a/esapi/api.xpack.ml.delete_model_snapshot.go +++ b/esapi/api.xpack.ml.delete_model_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_trained_model.go b/esapi/api.xpack.ml.delete_trained_model.go index 660ddf1b44..57003bfb6b 100644 --- a/esapi/api.xpack.ml.delete_trained_model.go +++ b/esapi/api.xpack.ml.delete_trained_model.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.delete_trained_model_alias.go b/esapi/api.xpack.ml.delete_trained_model_alias.go index 855f44bb9e..3b786681ad 100644 --- a/esapi/api.xpack.ml.delete_trained_model_alias.go +++ b/esapi/api.xpack.ml.delete_trained_model_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.estimate_model_memory.go b/esapi/api.xpack.ml.estimate_model_memory.go index 90abe9b67e..28331f1de6 100644 --- a/esapi/api.xpack.ml.estimate_model_memory.go +++ b/esapi/api.xpack.ml.estimate_model_memory.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.evaluate_data_frame.go b/esapi/api.xpack.ml.evaluate_data_frame.go index 4f68841e75..683e502e59 100644 --- a/esapi/api.xpack.ml.evaluate_data_frame.go +++ b/esapi/api.xpack.ml.evaluate_data_frame.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.explain_data_frame_analytics.go b/esapi/api.xpack.ml.explain_data_frame_analytics.go index 0e8d5d7306..dd2a548ca5 100644 --- a/esapi/api.xpack.ml.explain_data_frame_analytics.go +++ b/esapi/api.xpack.ml.explain_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.flush_job.go b/esapi/api.xpack.ml.flush_job.go index 5f2394c643..26b043caf1 100644 --- a/esapi/api.xpack.ml.flush_job.go +++ b/esapi/api.xpack.ml.flush_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.forecast.go b/esapi/api.xpack.ml.forecast.go index c290711c38..6e6fdfb29b 100644 --- a/esapi/api.xpack.ml.forecast.go +++ b/esapi/api.xpack.ml.forecast.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_buckets.go b/esapi/api.xpack.ml.get_buckets.go index aae1350c18..97adb7a141 100644 --- a/esapi/api.xpack.ml.get_buckets.go +++ b/esapi/api.xpack.ml.get_buckets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_calendar_events.go b/esapi/api.xpack.ml.get_calendar_events.go index 319b35e6dd..0f7d32b120 100644 --- a/esapi/api.xpack.ml.get_calendar_events.go +++ b/esapi/api.xpack.ml.get_calendar_events.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_calendars.go b/esapi/api.xpack.ml.get_calendars.go index 4ffe1501b2..e273487878 100644 --- a/esapi/api.xpack.ml.get_calendars.go +++ b/esapi/api.xpack.ml.get_calendars.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_categories.go b/esapi/api.xpack.ml.get_categories.go index c30c544438..ce4ce38197 100644 --- a/esapi/api.xpack.ml.get_categories.go +++ b/esapi/api.xpack.ml.get_categories.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_data_frame_analytics.go b/esapi/api.xpack.ml.get_data_frame_analytics.go index c25a904213..6beeb5a6b5 100644 --- a/esapi/api.xpack.ml.get_data_frame_analytics.go +++ b/esapi/api.xpack.ml.get_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_data_frame_analytics_stats.go b/esapi/api.xpack.ml.get_data_frame_analytics_stats.go index fdcb8d41c5..516c6df144 100644 --- a/esapi/api.xpack.ml.get_data_frame_analytics_stats.go +++ b/esapi/api.xpack.ml.get_data_frame_analytics_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_datafeed_stats.go b/esapi/api.xpack.ml.get_datafeed_stats.go index 3fc774f3e1..004e7faba3 100644 --- a/esapi/api.xpack.ml.get_datafeed_stats.go +++ b/esapi/api.xpack.ml.get_datafeed_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_datafeeds.go b/esapi/api.xpack.ml.get_datafeeds.go index f9fc08027e..24daaa502c 100644 --- a/esapi/api.xpack.ml.get_datafeeds.go +++ b/esapi/api.xpack.ml.get_datafeeds.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_filters.go b/esapi/api.xpack.ml.get_filters.go index daf0410e88..5b4be15265 100644 --- a/esapi/api.xpack.ml.get_filters.go +++ b/esapi/api.xpack.ml.get_filters.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_influencers.go b/esapi/api.xpack.ml.get_influencers.go index 01024e2d89..c78c6a956a 100644 --- a/esapi/api.xpack.ml.get_influencers.go +++ b/esapi/api.xpack.ml.get_influencers.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_job_stats.go b/esapi/api.xpack.ml.get_job_stats.go index 0e24ef70b3..9269c7fe10 100644 --- a/esapi/api.xpack.ml.get_job_stats.go +++ b/esapi/api.xpack.ml.get_job_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_jobs.go b/esapi/api.xpack.ml.get_jobs.go index 76df09282e..d168df4372 100644 --- a/esapi/api.xpack.ml.get_jobs.go +++ b/esapi/api.xpack.ml.get_jobs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_memory_stats.go b/esapi/api.xpack.ml.get_memory_stats.go index 5b9c3b5ef8..1338e90f35 100644 --- a/esapi/api.xpack.ml.get_memory_stats.go +++ b/esapi/api.xpack.ml.get_memory_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go b/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go index bb30dd2f3e..e03d1ce07a 100644 --- a/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go +++ b/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_model_snapshots.go b/esapi/api.xpack.ml.get_model_snapshots.go index f3b607fec2..c7ec2ee5be 100644 --- a/esapi/api.xpack.ml.get_model_snapshots.go +++ b/esapi/api.xpack.ml.get_model_snapshots.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_overall_buckets.go b/esapi/api.xpack.ml.get_overall_buckets.go index 5e98222931..35f8b8dfa2 100644 --- a/esapi/api.xpack.ml.get_overall_buckets.go +++ b/esapi/api.xpack.ml.get_overall_buckets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_records.go b/esapi/api.xpack.ml.get_records.go index 5d7e04fe81..c922c2d9a3 100644 --- a/esapi/api.xpack.ml.get_records.go +++ b/esapi/api.xpack.ml.get_records.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_trained_models.go b/esapi/api.xpack.ml.get_trained_models.go index 9889f6f1f6..45d428d619 100644 --- a/esapi/api.xpack.ml.get_trained_models.go +++ b/esapi/api.xpack.ml.get_trained_models.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.get_trained_models_stats.go b/esapi/api.xpack.ml.get_trained_models_stats.go index 0d51c06d5c..d2a7cb5d4b 100644 --- a/esapi/api.xpack.ml.get_trained_models_stats.go +++ b/esapi/api.xpack.ml.get_trained_models_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.infer_trained_model.go b/esapi/api.xpack.ml.infer_trained_model.go index e7e4619b9e..1bace44821 100644 --- a/esapi/api.xpack.ml.infer_trained_model.go +++ b/esapi/api.xpack.ml.infer_trained_model.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.info.go b/esapi/api.xpack.ml.info.go index a3c238084e..8862e753a4 100644 --- a/esapi/api.xpack.ml.info.go +++ b/esapi/api.xpack.ml.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.open_job.go b/esapi/api.xpack.ml.open_job.go index 1f03cce36c..54b8b9b404 100644 --- a/esapi/api.xpack.ml.open_job.go +++ b/esapi/api.xpack.ml.open_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.post_calendar_events.go b/esapi/api.xpack.ml.post_calendar_events.go index d0cc1a479f..1b8d497c4f 100644 --- a/esapi/api.xpack.ml.post_calendar_events.go +++ b/esapi/api.xpack.ml.post_calendar_events.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.post_data.go b/esapi/api.xpack.ml.post_data.go index cf3767351d..e97198914c 100644 --- a/esapi/api.xpack.ml.post_data.go +++ b/esapi/api.xpack.ml.post_data.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.preview_data_frame_analytics.go b/esapi/api.xpack.ml.preview_data_frame_analytics.go index 7de623cf56..36dd5a0b8d 100644 --- a/esapi/api.xpack.ml.preview_data_frame_analytics.go +++ b/esapi/api.xpack.ml.preview_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.preview_datafeed.go b/esapi/api.xpack.ml.preview_datafeed.go index ec9dac2462..e05fbe3539 100644 --- a/esapi/api.xpack.ml.preview_datafeed.go +++ b/esapi/api.xpack.ml.preview_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_calendar.go b/esapi/api.xpack.ml.put_calendar.go index c7efd600b2..d28d637a9e 100644 --- a/esapi/api.xpack.ml.put_calendar.go +++ b/esapi/api.xpack.ml.put_calendar.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_calendar_job.go b/esapi/api.xpack.ml.put_calendar_job.go index d955776cbd..4fc81239b9 100644 --- a/esapi/api.xpack.ml.put_calendar_job.go +++ b/esapi/api.xpack.ml.put_calendar_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_data_frame_analytics.go b/esapi/api.xpack.ml.put_data_frame_analytics.go index dd6086abf2..b84548120e 100644 --- a/esapi/api.xpack.ml.put_data_frame_analytics.go +++ b/esapi/api.xpack.ml.put_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_datafeed.go b/esapi/api.xpack.ml.put_datafeed.go index 46f550d63e..759d792bfc 100644 --- a/esapi/api.xpack.ml.put_datafeed.go +++ b/esapi/api.xpack.ml.put_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_filter.go b/esapi/api.xpack.ml.put_filter.go index 83479aa417..fec198e794 100644 --- a/esapi/api.xpack.ml.put_filter.go +++ b/esapi/api.xpack.ml.put_filter.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_job.go b/esapi/api.xpack.ml.put_job.go index 47e6316c8e..e639bfe966 100644 --- a/esapi/api.xpack.ml.put_job.go +++ b/esapi/api.xpack.ml.put_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_trained_model.go b/esapi/api.xpack.ml.put_trained_model.go index bcfcd79136..e6fad595e9 100644 --- a/esapi/api.xpack.ml.put_trained_model.go +++ b/esapi/api.xpack.ml.put_trained_model.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_trained_model_alias.go b/esapi/api.xpack.ml.put_trained_model_alias.go index b008226531..c87da5c092 100644 --- a/esapi/api.xpack.ml.put_trained_model_alias.go +++ b/esapi/api.xpack.ml.put_trained_model_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_trained_model_definition_part.go b/esapi/api.xpack.ml.put_trained_model_definition_part.go index 8e806cf9d0..7004b01eec 100644 --- a/esapi/api.xpack.ml.put_trained_model_definition_part.go +++ b/esapi/api.xpack.ml.put_trained_model_definition_part.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.put_trained_model_vocabulary.go b/esapi/api.xpack.ml.put_trained_model_vocabulary.go index 48f08dc0bf..0563d323ff 100644 --- a/esapi/api.xpack.ml.put_trained_model_vocabulary.go +++ b/esapi/api.xpack.ml.put_trained_model_vocabulary.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.reset_job.go b/esapi/api.xpack.ml.reset_job.go index 1999f9bf6c..00fca38e36 100644 --- a/esapi/api.xpack.ml.reset_job.go +++ b/esapi/api.xpack.ml.reset_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.revert_model_snapshot.go b/esapi/api.xpack.ml.revert_model_snapshot.go index bc6b3fa63d..dd9eddaa35 100644 --- a/esapi/api.xpack.ml.revert_model_snapshot.go +++ b/esapi/api.xpack.ml.revert_model_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.set_upgrade_mode.go b/esapi/api.xpack.ml.set_upgrade_mode.go index 0f45117209..f78131b692 100644 --- a/esapi/api.xpack.ml.set_upgrade_mode.go +++ b/esapi/api.xpack.ml.set_upgrade_mode.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.start_data_frame_analytics.go b/esapi/api.xpack.ml.start_data_frame_analytics.go index dbc57ef4d2..1b82d06960 100644 --- a/esapi/api.xpack.ml.start_data_frame_analytics.go +++ b/esapi/api.xpack.ml.start_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.start_datafeed.go b/esapi/api.xpack.ml.start_datafeed.go index 8f02eadd4c..402fa68e61 100644 --- a/esapi/api.xpack.ml.start_datafeed.go +++ b/esapi/api.xpack.ml.start_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.start_trained_model_deployment.go b/esapi/api.xpack.ml.start_trained_model_deployment.go index fc008407e3..e8939beeaf 100644 --- a/esapi/api.xpack.ml.start_trained_model_deployment.go +++ b/esapi/api.xpack.ml.start_trained_model_deployment.go @@ -15,12 +15,13 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi import ( "context" + "io" "net/http" "strconv" "strings" @@ -51,6 +52,8 @@ type MLStartTrainedModelDeployment func(model_id string, o ...func(*MLStartTrain // MLStartTrainedModelDeploymentRequest configures the ML Start Trained Model Deployment API request. type MLStartTrainedModelDeploymentRequest struct { + Body io.Reader + ModelID string CacheSize string @@ -159,7 +162,7 @@ func (r MLStartTrainedModelDeploymentRequest) Do(providedCtx context.Context, tr params["filter_path"] = strings.Join(r.FilterPath, ",") } - req, err := newRequest(method, path.String(), nil) + req, err := newRequest(method, path.String(), r.Body) if err != nil { if instrument, ok := r.instrument.(Instrumentation); ok { instrument.RecordError(ctx, err) @@ -187,12 +190,19 @@ func (r MLStartTrainedModelDeploymentRequest) Do(providedCtx context.Context, tr } } + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + if ctx != nil { req = req.WithContext(ctx) } if instrument, ok := r.instrument.(Instrumentation); ok { instrument.BeforeRequest(req, "ml.start_trained_model_deployment") + if reader := instrument.RecordRequestBody(ctx, "ml.start_trained_model_deployment", r.Body); reader != nil { + req.Body = reader + } } res, err := transport.Perform(req) if instrument, ok := r.instrument.(Instrumentation); ok { @@ -221,6 +231,13 @@ func (f MLStartTrainedModelDeployment) WithContext(v context.Context) func(*MLSt } } +// WithBody - The settings for the trained model deployment. +func (f MLStartTrainedModelDeployment) WithBody(v io.Reader) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.Body = v + } +} + // WithCacheSize - a byte-size value for configuring the inference cache size. for example, 20mb.. func (f MLStartTrainedModelDeployment) WithCacheSize(v string) func(*MLStartTrainedModelDeploymentRequest) { return func(r *MLStartTrainedModelDeploymentRequest) { diff --git a/esapi/api.xpack.ml.stop_data_frame_analytics.go b/esapi/api.xpack.ml.stop_data_frame_analytics.go index a7aa00bc2f..24292a74a1 100644 --- a/esapi/api.xpack.ml.stop_data_frame_analytics.go +++ b/esapi/api.xpack.ml.stop_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.stop_datafeed.go b/esapi/api.xpack.ml.stop_datafeed.go index 0a51e2b5f0..58bd424e85 100644 --- a/esapi/api.xpack.ml.stop_datafeed.go +++ b/esapi/api.xpack.ml.stop_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.stop_trained_model_deployment.go b/esapi/api.xpack.ml.stop_trained_model_deployment.go index 4a25f46955..ac6105a1cb 100644 --- a/esapi/api.xpack.ml.stop_trained_model_deployment.go +++ b/esapi/api.xpack.ml.stop_trained_model_deployment.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_data_frame_analytics.go b/esapi/api.xpack.ml.update_data_frame_analytics.go index 9a38e1a897..34537c68e0 100644 --- a/esapi/api.xpack.ml.update_data_frame_analytics.go +++ b/esapi/api.xpack.ml.update_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_datafeed.go b/esapi/api.xpack.ml.update_datafeed.go index 486c7dc1b7..d42b335afb 100644 --- a/esapi/api.xpack.ml.update_datafeed.go +++ b/esapi/api.xpack.ml.update_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_filter.go b/esapi/api.xpack.ml.update_filter.go index 90e3930452..d4b74bef38 100644 --- a/esapi/api.xpack.ml.update_filter.go +++ b/esapi/api.xpack.ml.update_filter.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_job.go b/esapi/api.xpack.ml.update_job.go index 540f2145e3..022030a8bc 100644 --- a/esapi/api.xpack.ml.update_job.go +++ b/esapi/api.xpack.ml.update_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_model_snapshot.go b/esapi/api.xpack.ml.update_model_snapshot.go index ec5078297c..bb2d2811b7 100644 --- a/esapi/api.xpack.ml.update_model_snapshot.go +++ b/esapi/api.xpack.ml.update_model_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.update_trained_model_deployment.go b/esapi/api.xpack.ml.update_trained_model_deployment.go index fb5345319f..3d7b8500b3 100644 --- a/esapi/api.xpack.ml.update_trained_model_deployment.go +++ b/esapi/api.xpack.ml.update_trained_model_deployment.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.upgrade_job_snapshot.go b/esapi/api.xpack.ml.upgrade_job_snapshot.go index 33b6a18933..36fd003535 100644 --- a/esapi/api.xpack.ml.upgrade_job_snapshot.go +++ b/esapi/api.xpack.ml.upgrade_job_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.validate.go b/esapi/api.xpack.ml.validate.go index 851acb6790..aa9f8c23c8 100644 --- a/esapi/api.xpack.ml.validate.go +++ b/esapi/api.xpack.ml.validate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ml.validate_detector.go b/esapi/api.xpack.ml.validate_detector.go index a74f8ffd1d..1ebe87ee06 100644 --- a/esapi/api.xpack.ml.validate_detector.go +++ b/esapi/api.xpack.ml.validate_detector.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.monitoring.bulk.go b/esapi/api.xpack.monitoring.bulk.go index f80c59d3c7..5673132785 100644 --- a/esapi/api.xpack.monitoring.bulk.go +++ b/esapi/api.xpack.monitoring.bulk.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.open_point_in_time.go b/esapi/api.xpack.open_point_in_time.go index 210b811228..de124c840b 100644 --- a/esapi/api.xpack.open_point_in_time.go +++ b/esapi/api.xpack.open_point_in_time.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -56,11 +56,12 @@ type OpenPointInTimeRequest struct { Body io.Reader - ExpandWildcards string - IgnoreUnavailable *bool - KeepAlive string - Preference string - Routing string + AllowPartialSearchResults *bool + ExpandWildcards string + IgnoreUnavailable *bool + KeepAlive string + Preference string + Routing string Pretty bool Human bool @@ -109,6 +110,10 @@ func (r OpenPointInTimeRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.AllowPartialSearchResults != nil { + params["allow_partial_search_results"] = strconv.FormatBool(*r.AllowPartialSearchResults) + } + if r.ExpandWildcards != "" { params["expand_wildcards"] = r.ExpandWildcards } @@ -221,6 +226,13 @@ func (f OpenPointInTime) WithBody(v io.Reader) func(*OpenPointInTimeRequest) { } } +// WithAllowPartialSearchResults - specify whether to tolerate shards missing when creating the point-in-time, or otherwise throw an exception. (default: false). +func (f OpenPointInTime) WithAllowPartialSearchResults(v bool) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.AllowPartialSearchResults = &v + } +} + // WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. func (f OpenPointInTime) WithExpandWildcards(v string) func(*OpenPointInTimeRequest) { return func(r *OpenPointInTimeRequest) { diff --git a/esapi/api.xpack.profiling.flamegraph.go b/esapi/api.xpack.profiling.flamegraph.go index 7335792c01..b1ccd56e4f 100644 --- a/esapi/api.xpack.profiling.flamegraph.go +++ b/esapi/api.xpack.profiling.flamegraph.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.delete_job.go b/esapi/api.xpack.rollup.delete_job.go index 2fa13e96e7..67f1bb1a1e 100644 --- a/esapi/api.xpack.rollup.delete_job.go +++ b/esapi/api.xpack.rollup.delete_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.get_jobs.go b/esapi/api.xpack.rollup.get_jobs.go index 42d0827dd4..e46f14c54d 100644 --- a/esapi/api.xpack.rollup.get_jobs.go +++ b/esapi/api.xpack.rollup.get_jobs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.get_rollup_caps.go b/esapi/api.xpack.rollup.get_rollup_caps.go index eeb2beba5f..9bf41dd485 100644 --- a/esapi/api.xpack.rollup.get_rollup_caps.go +++ b/esapi/api.xpack.rollup.get_rollup_caps.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.get_rollup_index_caps.go b/esapi/api.xpack.rollup.get_rollup_index_caps.go index dc97b76eb1..614ad12d1c 100644 --- a/esapi/api.xpack.rollup.get_rollup_index_caps.go +++ b/esapi/api.xpack.rollup.get_rollup_index_caps.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.put_job.go b/esapi/api.xpack.rollup.put_job.go index 9ad730413d..98cf8885d9 100644 --- a/esapi/api.xpack.rollup.put_job.go +++ b/esapi/api.xpack.rollup.put_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.rollup_search.go b/esapi/api.xpack.rollup.rollup_search.go index 9d37d79b24..dc4776750d 100644 --- a/esapi/api.xpack.rollup.rollup_search.go +++ b/esapi/api.xpack.rollup.rollup_search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.start_job.go b/esapi/api.xpack.rollup.start_job.go index bd28c9be20..764f6981e2 100644 --- a/esapi/api.xpack.rollup.start_job.go +++ b/esapi/api.xpack.rollup.start_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.rollup.stop_job.go b/esapi/api.xpack.rollup.stop_job.go index 540c069c4d..8cbef52207 100644 --- a/esapi/api.xpack.rollup.stop_job.go +++ b/esapi/api.xpack.rollup.stop_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.searchable_snapshots.cache_stats.go b/esapi/api.xpack.searchable_snapshots.cache_stats.go index b881ddce9a..6bad81750e 100644 --- a/esapi/api.xpack.searchable_snapshots.cache_stats.go +++ b/esapi/api.xpack.searchable_snapshots.cache_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.searchable_snapshots.clear_cache.go b/esapi/api.xpack.searchable_snapshots.clear_cache.go index cd0a69f875..7a00e35d96 100644 --- a/esapi/api.xpack.searchable_snapshots.clear_cache.go +++ b/esapi/api.xpack.searchable_snapshots.clear_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.searchable_snapshots.mount.go b/esapi/api.xpack.searchable_snapshots.mount.go index 91aa50429a..765e2b38b7 100644 --- a/esapi/api.xpack.searchable_snapshots.mount.go +++ b/esapi/api.xpack.searchable_snapshots.mount.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.searchable_snapshots.stats.go b/esapi/api.xpack.searchable_snapshots.stats.go index a2762f1e39..dbba53a6b1 100644 --- a/esapi/api.xpack.searchable_snapshots.stats.go +++ b/esapi/api.xpack.searchable_snapshots.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.activate_user_profile.go b/esapi/api.xpack.security.activate_user_profile.go index 5be9dc3254..1574ac5647 100644 --- a/esapi/api.xpack.security.activate_user_profile.go +++ b/esapi/api.xpack.security.activate_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.authenticate.go b/esapi/api.xpack.security.authenticate.go index 1cf6539414..de0e07857f 100644 --- a/esapi/api.xpack.security.authenticate.go +++ b/esapi/api.xpack.security.authenticate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.bulk_delete_role.go b/esapi/api.xpack.security.bulk_delete_role.go index 3790b9b590..3231c73fca 100644 --- a/esapi/api.xpack.security.bulk_delete_role.go +++ b/esapi/api.xpack.security.bulk_delete_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.bulk_put_role.go b/esapi/api.xpack.security.bulk_put_role.go index 5c5e4a5202..6928d0f83f 100644 --- a/esapi/api.xpack.security.bulk_put_role.go +++ b/esapi/api.xpack.security.bulk_put_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.bulk_update_api_keys.go b/esapi/api.xpack.security.bulk_update_api_keys.go index 248e7f128f..3b918dfdf2 100644 --- a/esapi/api.xpack.security.bulk_update_api_keys.go +++ b/esapi/api.xpack.security.bulk_update_api_keys.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.change_password.go b/esapi/api.xpack.security.change_password.go index f2cab71623..d5c5629cfb 100644 --- a/esapi/api.xpack.security.change_password.go +++ b/esapi/api.xpack.security.change_password.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_api_key_cache.go b/esapi/api.xpack.security.clear_api_key_cache.go index 3fee069eda..8621e282c3 100644 --- a/esapi/api.xpack.security.clear_api_key_cache.go +++ b/esapi/api.xpack.security.clear_api_key_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_cached_privileges.go b/esapi/api.xpack.security.clear_cached_privileges.go index 5ed1a4778f..59c5a5f056 100644 --- a/esapi/api.xpack.security.clear_cached_privileges.go +++ b/esapi/api.xpack.security.clear_cached_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_cached_realms.go b/esapi/api.xpack.security.clear_cached_realms.go index 506d038de6..344b5a9e9c 100644 --- a/esapi/api.xpack.security.clear_cached_realms.go +++ b/esapi/api.xpack.security.clear_cached_realms.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_cached_roles.go b/esapi/api.xpack.security.clear_cached_roles.go index 9f1041738d..06bd152b00 100644 --- a/esapi/api.xpack.security.clear_cached_roles.go +++ b/esapi/api.xpack.security.clear_cached_roles.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.clear_cached_service_tokens.go b/esapi/api.xpack.security.clear_cached_service_tokens.go index 3933616bbd..1d8e0206af 100644 --- a/esapi/api.xpack.security.clear_cached_service_tokens.go +++ b/esapi/api.xpack.security.clear_cached_service_tokens.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.create_api_key.go b/esapi/api.xpack.security.create_api_key.go index 76f57e254b..0149b32380 100644 --- a/esapi/api.xpack.security.create_api_key.go +++ b/esapi/api.xpack.security.create_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.create_cross_cluster_api_key.go b/esapi/api.xpack.security.create_cross_cluster_api_key.go index aca03ed95e..dc00a7679e 100644 --- a/esapi/api.xpack.security.create_cross_cluster_api_key.go +++ b/esapi/api.xpack.security.create_cross_cluster_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.create_service_token.go b/esapi/api.xpack.security.create_service_token.go index 5ef29cf4f3..0b9ede473a 100644 --- a/esapi/api.xpack.security.create_service_token.go +++ b/esapi/api.xpack.security.create_service_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delegate_pki.go b/esapi/api.xpack.security.delegate_pki.go new file mode 100644 index 0000000000..5e228e8f9b --- /dev/null +++ b/esapi/api.xpack.security.delegate_pki.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.18.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityDelegatePkiFunc(t Transport) SecurityDelegatePki { + return func(body io.Reader, o ...func(*SecurityDelegatePkiRequest)) (*Response, error) { + var r = SecurityDelegatePkiRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDelegatePki - Delegate PKI authentication. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delegate-pki-authentication.html. +type SecurityDelegatePki func(body io.Reader, o ...func(*SecurityDelegatePkiRequest)) (*Response, error) + +// SecurityDelegatePkiRequest configures the Security Delegate Pki API request. +type SecurityDelegatePkiRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDelegatePkiRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delegate_pki") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/delegate_pki")) + path.WriteString("http://") + path.WriteString("/_security/delegate_pki") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.delegate_pki") + if reader := instrument.RecordRequestBody(ctx, "security.delegate_pki", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delegate_pki") + } + if err != nil { + if instrument, ok := r.instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDelegatePki) WithContext(v context.Context) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDelegatePki) WithPretty() func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDelegatePki) WithHuman() func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDelegatePki) WithErrorTrace() func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDelegatePki) WithFilterPath(v ...string) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDelegatePki) WithHeader(h map[string]string) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDelegatePki) WithOpaqueID(s string) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/esapi/api.xpack.security.delete_privileges.go b/esapi/api.xpack.security.delete_privileges.go index fd6f199c5c..f5fc5c4baa 100644 --- a/esapi/api.xpack.security.delete_privileges.go +++ b/esapi/api.xpack.security.delete_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delete_role.go b/esapi/api.xpack.security.delete_role.go index e01f8930d6..ef9f35e282 100644 --- a/esapi/api.xpack.security.delete_role.go +++ b/esapi/api.xpack.security.delete_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delete_role_mapping.go b/esapi/api.xpack.security.delete_role_mapping.go index 978423920f..c23c60a74d 100644 --- a/esapi/api.xpack.security.delete_role_mapping.go +++ b/esapi/api.xpack.security.delete_role_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delete_service_token.go b/esapi/api.xpack.security.delete_service_token.go index 55cf6e8ccb..8d39bc7088 100644 --- a/esapi/api.xpack.security.delete_service_token.go +++ b/esapi/api.xpack.security.delete_service_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.delete_user.go b/esapi/api.xpack.security.delete_user.go index 81a9bf39c0..3e184efdb0 100644 --- a/esapi/api.xpack.security.delete_user.go +++ b/esapi/api.xpack.security.delete_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.disable_user.go b/esapi/api.xpack.security.disable_user.go index a119a31d33..fdbdd43e16 100644 --- a/esapi/api.xpack.security.disable_user.go +++ b/esapi/api.xpack.security.disable_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.disable_user_profile.go b/esapi/api.xpack.security.disable_user_profile.go index 04575c5df6..e0616fd4fe 100644 --- a/esapi/api.xpack.security.disable_user_profile.go +++ b/esapi/api.xpack.security.disable_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.enable_user.go b/esapi/api.xpack.security.enable_user.go index ae798b8871..a5f2ef455f 100644 --- a/esapi/api.xpack.security.enable_user.go +++ b/esapi/api.xpack.security.enable_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.enable_user_profile.go b/esapi/api.xpack.security.enable_user_profile.go index 3f10e39a5e..977a326d48 100644 --- a/esapi/api.xpack.security.enable_user_profile.go +++ b/esapi/api.xpack.security.enable_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.enroll_kibana.go b/esapi/api.xpack.security.enroll_kibana.go index b65c1f7477..01a865c4bb 100644 --- a/esapi/api.xpack.security.enroll_kibana.go +++ b/esapi/api.xpack.security.enroll_kibana.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.enroll_node.go b/esapi/api.xpack.security.enroll_node.go index 50e3962d03..c32a848458 100644 --- a/esapi/api.xpack.security.enroll_node.go +++ b/esapi/api.xpack.security.enroll_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_api_key.go b/esapi/api.xpack.security.get_api_key.go index c1fa14e21a..ecc466483e 100644 --- a/esapi/api.xpack.security.get_api_key.go +++ b/esapi/api.xpack.security.get_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_builtin_privileges.go b/esapi/api.xpack.security.get_builtin_privileges.go index abdb3b39bb..c507fa6a0f 100644 --- a/esapi/api.xpack.security.get_builtin_privileges.go +++ b/esapi/api.xpack.security.get_builtin_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_privileges.go b/esapi/api.xpack.security.get_privileges.go index fa21bccf53..1feb6b6a4f 100644 --- a/esapi/api.xpack.security.get_privileges.go +++ b/esapi/api.xpack.security.get_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_role.go b/esapi/api.xpack.security.get_role.go index 4b4d0fe91d..ec68ef8fb2 100644 --- a/esapi/api.xpack.security.get_role.go +++ b/esapi/api.xpack.security.get_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_role_mapping.go b/esapi/api.xpack.security.get_role_mapping.go index b89c0e6dfc..8afa2ddece 100644 --- a/esapi/api.xpack.security.get_role_mapping.go +++ b/esapi/api.xpack.security.get_role_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_service_accounts.go b/esapi/api.xpack.security.get_service_accounts.go index e125bf767d..7d9dae7eb8 100644 --- a/esapi/api.xpack.security.get_service_accounts.go +++ b/esapi/api.xpack.security.get_service_accounts.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_service_credentials.go b/esapi/api.xpack.security.get_service_credentials.go index 11e54f96cd..9464a930e3 100644 --- a/esapi/api.xpack.security.get_service_credentials.go +++ b/esapi/api.xpack.security.get_service_credentials.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_settings.go b/esapi/api.xpack.security.get_settings.go index 16cdde250e..62e2d1017f 100644 --- a/esapi/api.xpack.security.get_settings.go +++ b/esapi/api.xpack.security.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_token.go b/esapi/api.xpack.security.get_token.go index a7581472ad..5ad3c19625 100644 --- a/esapi/api.xpack.security.get_token.go +++ b/esapi/api.xpack.security.get_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_user.go b/esapi/api.xpack.security.get_user.go index 71b11ee7f7..d3a3dd87e7 100644 --- a/esapi/api.xpack.security.get_user.go +++ b/esapi/api.xpack.security.get_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_user_privileges.go b/esapi/api.xpack.security.get_user_privileges.go index f96994ab9a..ab433b547f 100644 --- a/esapi/api.xpack.security.get_user_privileges.go +++ b/esapi/api.xpack.security.get_user_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.get_user_profile.go b/esapi/api.xpack.security.get_user_profile.go index cfd7be6ec3..7c699ae50a 100644 --- a/esapi/api.xpack.security.get_user_profile.go +++ b/esapi/api.xpack.security.get_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.grant_api_key.go b/esapi/api.xpack.security.grant_api_key.go index 7f64353aff..c8e7c97acc 100644 --- a/esapi/api.xpack.security.grant_api_key.go +++ b/esapi/api.xpack.security.grant_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.has_privileges.go b/esapi/api.xpack.security.has_privileges.go index f460ab1c91..f9ef5ead5b 100644 --- a/esapi/api.xpack.security.has_privileges.go +++ b/esapi/api.xpack.security.has_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.has_privileges_user_profile.go b/esapi/api.xpack.security.has_privileges_user_profile.go index bc073c75e7..28c5967888 100644 --- a/esapi/api.xpack.security.has_privileges_user_profile.go +++ b/esapi/api.xpack.security.has_privileges_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.invalidate_api_key.go b/esapi/api.xpack.security.invalidate_api_key.go index 7d30c1aac1..828d5fd6c0 100644 --- a/esapi/api.xpack.security.invalidate_api_key.go +++ b/esapi/api.xpack.security.invalidate_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.invalidate_token.go b/esapi/api.xpack.security.invalidate_token.go index 166ffb84d4..9df4f4d42f 100644 --- a/esapi/api.xpack.security.invalidate_token.go +++ b/esapi/api.xpack.security.invalidate_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.oidc_authenticate.go b/esapi/api.xpack.security.oidc_authenticate.go index 3c60956280..c39c9dd4fd 100644 --- a/esapi/api.xpack.security.oidc_authenticate.go +++ b/esapi/api.xpack.security.oidc_authenticate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.oidc_logout.go b/esapi/api.xpack.security.oidc_logout.go index 9e9ea8e5f9..74506154eb 100644 --- a/esapi/api.xpack.security.oidc_logout.go +++ b/esapi/api.xpack.security.oidc_logout.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.oidc_prepare_authentication.go b/esapi/api.xpack.security.oidc_prepare_authentication.go index 2fc9c971b1..8e88a70bd3 100644 --- a/esapi/api.xpack.security.oidc_prepare_authentication.go +++ b/esapi/api.xpack.security.oidc_prepare_authentication.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.put_privileges.go b/esapi/api.xpack.security.put_privileges.go index 85be74ceb9..d5a1c31090 100644 --- a/esapi/api.xpack.security.put_privileges.go +++ b/esapi/api.xpack.security.put_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.put_role.go b/esapi/api.xpack.security.put_role.go index 0b1da344de..e11d36c472 100644 --- a/esapi/api.xpack.security.put_role.go +++ b/esapi/api.xpack.security.put_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.put_role_mapping.go b/esapi/api.xpack.security.put_role_mapping.go index 798367f4ba..309ad361db 100644 --- a/esapi/api.xpack.security.put_role_mapping.go +++ b/esapi/api.xpack.security.put_role_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.put_user.go b/esapi/api.xpack.security.put_user.go index bd2f770c1d..dd9d2a2c97 100644 --- a/esapi/api.xpack.security.put_user.go +++ b/esapi/api.xpack.security.put_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.query_api_keys.go b/esapi/api.xpack.security.query_api_keys.go index 0608a3459d..71fd57ea1e 100644 --- a/esapi/api.xpack.security.query_api_keys.go +++ b/esapi/api.xpack.security.query_api_keys.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.query_role.go b/esapi/api.xpack.security.query_role.go index dbeb9d5f50..1b01207efd 100644 --- a/esapi/api.xpack.security.query_role.go +++ b/esapi/api.xpack.security.query_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.query_user.go b/esapi/api.xpack.security.query_user.go index 4f91b12eeb..3dc204cae7 100644 --- a/esapi/api.xpack.security.query_user.go +++ b/esapi/api.xpack.security.query_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_authenticate.go b/esapi/api.xpack.security.saml_authenticate.go index 39323896e9..fba0dd649b 100644 --- a/esapi/api.xpack.security.saml_authenticate.go +++ b/esapi/api.xpack.security.saml_authenticate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_complete_logout.go b/esapi/api.xpack.security.saml_complete_logout.go index 41e2771016..200191f712 100644 --- a/esapi/api.xpack.security.saml_complete_logout.go +++ b/esapi/api.xpack.security.saml_complete_logout.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_invalidate.go b/esapi/api.xpack.security.saml_invalidate.go index d6ec95b008..ac937ee0d6 100644 --- a/esapi/api.xpack.security.saml_invalidate.go +++ b/esapi/api.xpack.security.saml_invalidate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_logout.go b/esapi/api.xpack.security.saml_logout.go index 4df212a621..ac5886700d 100644 --- a/esapi/api.xpack.security.saml_logout.go +++ b/esapi/api.xpack.security.saml_logout.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_prepare_authentication.go b/esapi/api.xpack.security.saml_prepare_authentication.go index c3eaad279c..44eccd440b 100644 --- a/esapi/api.xpack.security.saml_prepare_authentication.go +++ b/esapi/api.xpack.security.saml_prepare_authentication.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.saml_service_provider_metadata.go b/esapi/api.xpack.security.saml_service_provider_metadata.go index 1027e6c23f..deca59aa8a 100644 --- a/esapi/api.xpack.security.saml_service_provider_metadata.go +++ b/esapi/api.xpack.security.saml_service_provider_metadata.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.suggest_user_profiles.go b/esapi/api.xpack.security.suggest_user_profiles.go index ab8b23821a..a5cea215ae 100644 --- a/esapi/api.xpack.security.suggest_user_profiles.go +++ b/esapi/api.xpack.security.suggest_user_profiles.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.update_api_key.go b/esapi/api.xpack.security.update_api_key.go index fec4bd5730..45667fd795 100644 --- a/esapi/api.xpack.security.update_api_key.go +++ b/esapi/api.xpack.security.update_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.update_cross_cluster_api_key.go b/esapi/api.xpack.security.update_cross_cluster_api_key.go index 6e0e73967f..98871e6131 100644 --- a/esapi/api.xpack.security.update_cross_cluster_api_key.go +++ b/esapi/api.xpack.security.update_cross_cluster_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.update_settings.go b/esapi/api.xpack.security.update_settings.go index 041803e1bd..0a25adbe8c 100644 --- a/esapi/api.xpack.security.update_settings.go +++ b/esapi/api.xpack.security.update_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.security.update_user_profile_data.go b/esapi/api.xpack.security.update_user_profile_data.go index 2fba8f349e..16ee86c817 100644 --- a/esapi/api.xpack.security.update_user_profile_data.go +++ b/esapi/api.xpack.security.update_user_profile_data.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.slm.delete_lifecycle.go b/esapi/api.xpack.slm.delete_lifecycle.go index 7e05d77c86..0878e250ed 100644 --- a/esapi/api.xpack.slm.delete_lifecycle.go +++ b/esapi/api.xpack.slm.delete_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmDeleteLifecycleFunc(t Transport) SlmDeleteLifecycle { @@ -51,6 +52,9 @@ type SlmDeleteLifecycle func(policy_id string, o ...func(*SlmDeleteLifecycleRequ type SlmDeleteLifecycleRequest struct { PolicyID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -96,6 +100,14 @@ func (r SlmDeleteLifecycleRequest) Do(providedCtx context.Context, transport Tra params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -174,6 +186,20 @@ func (f SlmDeleteLifecycle) WithContext(v context.Context) func(*SlmDeleteLifecy } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmDeleteLifecycle) WithMasterTimeout(v time.Duration) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmDeleteLifecycle) WithTimeout(v time.Duration) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmDeleteLifecycle) WithPretty() func(*SlmDeleteLifecycleRequest) { return func(r *SlmDeleteLifecycleRequest) { diff --git a/esapi/api.xpack.slm.execute_lifecycle.go b/esapi/api.xpack.slm.execute_lifecycle.go index d18186a1f5..dc688a2ef2 100644 --- a/esapi/api.xpack.slm.execute_lifecycle.go +++ b/esapi/api.xpack.slm.execute_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmExecuteLifecycleFunc(t Transport) SlmExecuteLifecycle { @@ -51,6 +52,9 @@ type SlmExecuteLifecycle func(policy_id string, o ...func(*SlmExecuteLifecycleRe type SlmExecuteLifecycleRequest struct { PolicyID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -98,6 +102,14 @@ func (r SlmExecuteLifecycleRequest) Do(providedCtx context.Context, transport Tr params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -176,6 +188,20 @@ func (f SlmExecuteLifecycle) WithContext(v context.Context) func(*SlmExecuteLife } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmExecuteLifecycle) WithMasterTimeout(v time.Duration) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmExecuteLifecycle) WithTimeout(v time.Duration) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmExecuteLifecycle) WithPretty() func(*SlmExecuteLifecycleRequest) { return func(r *SlmExecuteLifecycleRequest) { diff --git a/esapi/api.xpack.slm.execute_retention.go b/esapi/api.xpack.slm.execute_retention.go index 3007d49f8a..7a19386858 100644 --- a/esapi/api.xpack.slm.execute_retention.go +++ b/esapi/api.xpack.slm.execute_retention.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmExecuteRetentionFunc(t Transport) SlmExecuteRetention { @@ -49,6 +50,9 @@ type SlmExecuteRetention func(o ...func(*SlmExecuteRetentionRequest)) (*Response // SlmExecuteRetentionRequest configures the Slm Execute Retention API request. type SlmExecuteRetentionRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -86,6 +90,14 @@ func (r SlmExecuteRetentionRequest) Do(providedCtx context.Context, transport Tr params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -164,6 +176,20 @@ func (f SlmExecuteRetention) WithContext(v context.Context) func(*SlmExecuteRete } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmExecuteRetention) WithMasterTimeout(v time.Duration) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmExecuteRetention) WithTimeout(v time.Duration) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmExecuteRetention) WithPretty() func(*SlmExecuteRetentionRequest) { return func(r *SlmExecuteRetentionRequest) { diff --git a/esapi/api.xpack.slm.get_lifecycle.go b/esapi/api.xpack.slm.get_lifecycle.go index 842e455fcc..048312421c 100644 --- a/esapi/api.xpack.slm.get_lifecycle.go +++ b/esapi/api.xpack.slm.get_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmGetLifecycleFunc(t Transport) SlmGetLifecycle { @@ -51,6 +52,9 @@ type SlmGetLifecycle func(o ...func(*SlmGetLifecycleRequest)) (*Response, error) type SlmGetLifecycleRequest struct { PolicyID []string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -98,6 +102,14 @@ func (r SlmGetLifecycleRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -183,6 +195,20 @@ func (f SlmGetLifecycle) WithPolicyID(v ...string) func(*SlmGetLifecycleRequest) } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmGetLifecycle) WithMasterTimeout(v time.Duration) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmGetLifecycle) WithTimeout(v time.Duration) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmGetLifecycle) WithPretty() func(*SlmGetLifecycleRequest) { return func(r *SlmGetLifecycleRequest) { diff --git a/esapi/api.xpack.slm.get_stats.go b/esapi/api.xpack.slm.get_stats.go index 962e26afd3..194dfb8708 100644 --- a/esapi/api.xpack.slm.get_stats.go +++ b/esapi/api.xpack.slm.get_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmGetStatsFunc(t Transport) SlmGetStats { @@ -49,6 +50,9 @@ type SlmGetStats func(o ...func(*SlmGetStatsRequest)) (*Response, error) // SlmGetStatsRequest configures the Slm Get Stats API request. type SlmGetStatsRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -86,6 +90,14 @@ func (r SlmGetStatsRequest) Do(providedCtx context.Context, transport Transport) params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -164,6 +176,20 @@ func (f SlmGetStats) WithContext(v context.Context) func(*SlmGetStatsRequest) { } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmGetStats) WithMasterTimeout(v time.Duration) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmGetStats) WithTimeout(v time.Duration) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmGetStats) WithPretty() func(*SlmGetStatsRequest) { return func(r *SlmGetStatsRequest) { diff --git a/esapi/api.xpack.slm.get_status.go b/esapi/api.xpack.slm.get_status.go index 7920546515..832ed2739f 100644 --- a/esapi/api.xpack.slm.get_status.go +++ b/esapi/api.xpack.slm.get_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -23,6 +23,7 @@ import ( "context" "net/http" "strings" + "time" ) func newSlmGetStatusFunc(t Transport) SlmGetStatus { @@ -49,6 +50,9 @@ type SlmGetStatus func(o ...func(*SlmGetStatusRequest)) (*Response, error) // SlmGetStatusRequest configures the Slm Get Status API request. type SlmGetStatusRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -86,6 +90,14 @@ func (r SlmGetStatusRequest) Do(providedCtx context.Context, transport Transport params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -164,6 +176,20 @@ func (f SlmGetStatus) WithContext(v context.Context) func(*SlmGetStatusRequest) } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmGetStatus) WithMasterTimeout(v time.Duration) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmGetStatus) WithTimeout(v time.Duration) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmGetStatus) WithPretty() func(*SlmGetStatusRequest) { return func(r *SlmGetStatusRequest) { diff --git a/esapi/api.xpack.slm.put_lifecycle.go b/esapi/api.xpack.slm.put_lifecycle.go index 0ed61d6aee..e414dbd645 100644 --- a/esapi/api.xpack.slm.put_lifecycle.go +++ b/esapi/api.xpack.slm.put_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -24,6 +24,7 @@ import ( "io" "net/http" "strings" + "time" ) func newSlmPutLifecycleFunc(t Transport) SlmPutLifecycle { @@ -54,6 +55,9 @@ type SlmPutLifecycleRequest struct { PolicyID string + MasterTimeout time.Duration + Timeout time.Duration + Pretty bool Human bool ErrorTrace bool @@ -99,6 +103,14 @@ func (r SlmPutLifecycleRequest) Do(providedCtx context.Context, transport Transp params = make(map[string]string) + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + if r.Pretty { params["pretty"] = "true" } @@ -191,6 +203,20 @@ func (f SlmPutLifecycle) WithBody(v io.Reader) func(*SlmPutLifecycleRequest) { } } +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmPutLifecycle) WithMasterTimeout(v time.Duration) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmPutLifecycle) WithTimeout(v time.Duration) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.Timeout = v + } +} + // WithPretty makes the response body pretty-printed. func (f SlmPutLifecycle) WithPretty() func(*SlmPutLifecycleRequest) { return func(r *SlmPutLifecycleRequest) { diff --git a/esapi/api.xpack.slm.start.go b/esapi/api.xpack.slm.start.go index 09152462eb..9e978d0ae6 100644 --- a/esapi/api.xpack.slm.start.go +++ b/esapi/api.xpack.slm.start.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.slm.stop.go b/esapi/api.xpack.slm.stop.go index 3081996977..bfdf711a6d 100644 --- a/esapi/api.xpack.slm.stop.go +++ b/esapi/api.xpack.slm.stop.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.clear_cursor.go b/esapi/api.xpack.sql.clear_cursor.go index b9cde5ee1e..2c35d9d1d5 100644 --- a/esapi/api.xpack.sql.clear_cursor.go +++ b/esapi/api.xpack.sql.clear_cursor.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.delete_async.go b/esapi/api.xpack.sql.delete_async.go index d8ddf80acb..855d88e017 100644 --- a/esapi/api.xpack.sql.delete_async.go +++ b/esapi/api.xpack.sql.delete_async.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.get_async.go b/esapi/api.xpack.sql.get_async.go index c6629044d8..3c7d2b7d99 100644 --- a/esapi/api.xpack.sql.get_async.go +++ b/esapi/api.xpack.sql.get_async.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.get_async_status.go b/esapi/api.xpack.sql.get_async_status.go index afb0d33885..265f46df78 100644 --- a/esapi/api.xpack.sql.get_async_status.go +++ b/esapi/api.xpack.sql.get_async_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.query.go b/esapi/api.xpack.sql.query.go index 3c95c0360c..bd4b7d0123 100644 --- a/esapi/api.xpack.sql.query.go +++ b/esapi/api.xpack.sql.query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.sql.translate.go b/esapi/api.xpack.sql.translate.go index 9370a8704e..5a4fb01aa2 100644 --- a/esapi/api.xpack.sql.translate.go +++ b/esapi/api.xpack.sql.translate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.ssl.certificates.go b/esapi/api.xpack.ssl.certificates.go index 30abe1ded3..32ec51b152 100644 --- a/esapi/api.xpack.ssl.certificates.go +++ b/esapi/api.xpack.ssl.certificates.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.text_structure.find_field_structure.go b/esapi/api.xpack.text_structure.find_field_structure.go index 27c571f3fc..21822fbcbd 100644 --- a/esapi/api.xpack.text_structure.find_field_structure.go +++ b/esapi/api.xpack.text_structure.find_field_structure.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi @@ -28,7 +28,7 @@ import ( ) func newTextStructureFindFieldStructureFunc(t Transport) TextStructureFindFieldStructure { - return func(index string, field string, o ...func(*TextStructureFindFieldStructureRequest)) (*Response, error) { + return func(field string, index string, o ...func(*TextStructureFindFieldStructureRequest)) (*Response, error) { var r = TextStructureFindFieldStructureRequest{Index: index, Field: field} for _, f := range o { f(&r) diff --git a/esapi/api.xpack.text_structure.find_message_structure.go b/esapi/api.xpack.text_structure.find_message_structure.go index 2226a60e3d..05ffed08c4 100644 --- a/esapi/api.xpack.text_structure.find_message_structure.go +++ b/esapi/api.xpack.text_structure.find_message_structure.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.text_structure.find_structure.go b/esapi/api.xpack.text_structure.find_structure.go index 12610d9fd1..6c8748feea 100644 --- a/esapi/api.xpack.text_structure.find_structure.go +++ b/esapi/api.xpack.text_structure.find_structure.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.text_structure.test_grok_pattern.go b/esapi/api.xpack.text_structure.test_grok_pattern.go index 0110dfa0eb..ea1058b27b 100644 --- a/esapi/api.xpack.text_structure.test_grok_pattern.go +++ b/esapi/api.xpack.text_structure.test_grok_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.delete_transform.go b/esapi/api.xpack.transform.delete_transform.go index 57b66632b6..f0c2e8a0ff 100644 --- a/esapi/api.xpack.transform.delete_transform.go +++ b/esapi/api.xpack.transform.delete_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.get_node_stats.go b/esapi/api.xpack.transform.get_node_stats.go index 3e646cfbd3..89a611ac72 100644 --- a/esapi/api.xpack.transform.get_node_stats.go +++ b/esapi/api.xpack.transform.get_node_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.get_transform.go b/esapi/api.xpack.transform.get_transform.go index e37c22f26c..821a7d49ff 100644 --- a/esapi/api.xpack.transform.get_transform.go +++ b/esapi/api.xpack.transform.get_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.get_transform_stats.go b/esapi/api.xpack.transform.get_transform_stats.go index 378ec6c159..3d5d04a68c 100644 --- a/esapi/api.xpack.transform.get_transform_stats.go +++ b/esapi/api.xpack.transform.get_transform_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.preview_transform.go b/esapi/api.xpack.transform.preview_transform.go index d53d6ad745..5c3aa473da 100644 --- a/esapi/api.xpack.transform.preview_transform.go +++ b/esapi/api.xpack.transform.preview_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.put_transform.go b/esapi/api.xpack.transform.put_transform.go index e66b3994fc..1203da4ced 100644 --- a/esapi/api.xpack.transform.put_transform.go +++ b/esapi/api.xpack.transform.put_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.reset_transform.go b/esapi/api.xpack.transform.reset_transform.go index 61db94fe58..409225669c 100644 --- a/esapi/api.xpack.transform.reset_transform.go +++ b/esapi/api.xpack.transform.reset_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.schedule_now_transform.go b/esapi/api.xpack.transform.schedule_now_transform.go index a6a39e47d3..d7ab3b3a74 100644 --- a/esapi/api.xpack.transform.schedule_now_transform.go +++ b/esapi/api.xpack.transform.schedule_now_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.start_transform.go b/esapi/api.xpack.transform.start_transform.go index 441ced3b94..85d1a8d775 100644 --- a/esapi/api.xpack.transform.start_transform.go +++ b/esapi/api.xpack.transform.start_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.stop_transform.go b/esapi/api.xpack.transform.stop_transform.go index 02816a03be..3e9dfcc0bb 100644 --- a/esapi/api.xpack.transform.stop_transform.go +++ b/esapi/api.xpack.transform.stop_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.update_transform.go b/esapi/api.xpack.transform.update_transform.go index 56916b5b46..9dd842c58e 100644 --- a/esapi/api.xpack.transform.update_transform.go +++ b/esapi/api.xpack.transform.update_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.transform.upgrade_transforms.go b/esapi/api.xpack.transform.upgrade_transforms.go index 4a409c7f25..d4346620fa 100644 --- a/esapi/api.xpack.transform.upgrade_transforms.go +++ b/esapi/api.xpack.transform.upgrade_transforms.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.ack_watch.go b/esapi/api.xpack.watcher.ack_watch.go index 5368d8816a..845b48b56c 100644 --- a/esapi/api.xpack.watcher.ack_watch.go +++ b/esapi/api.xpack.watcher.ack_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.activate_watch.go b/esapi/api.xpack.watcher.activate_watch.go index 7895dab8c0..ba602ff735 100644 --- a/esapi/api.xpack.watcher.activate_watch.go +++ b/esapi/api.xpack.watcher.activate_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.deactivate_watch.go b/esapi/api.xpack.watcher.deactivate_watch.go index a2ce74fc75..b85ce9d2e8 100644 --- a/esapi/api.xpack.watcher.deactivate_watch.go +++ b/esapi/api.xpack.watcher.deactivate_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.delete_watch.go b/esapi/api.xpack.watcher.delete_watch.go index 6f51851f7c..d333ef9a5f 100644 --- a/esapi/api.xpack.watcher.delete_watch.go +++ b/esapi/api.xpack.watcher.delete_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.execute_watch.go b/esapi/api.xpack.watcher.execute_watch.go index 13487baab2..8481508986 100644 --- a/esapi/api.xpack.watcher.execute_watch.go +++ b/esapi/api.xpack.watcher.execute_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.get_settings.go b/esapi/api.xpack.watcher.get_settings.go index 5504455e1b..c52fd7bae1 100644 --- a/esapi/api.xpack.watcher.get_settings.go +++ b/esapi/api.xpack.watcher.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.get_watch.go b/esapi/api.xpack.watcher.get_watch.go index 0352f2c0e6..6c07689f95 100644 --- a/esapi/api.xpack.watcher.get_watch.go +++ b/esapi/api.xpack.watcher.get_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.put_watch.go b/esapi/api.xpack.watcher.put_watch.go index d4cacf7395..a372aa6c79 100644 --- a/esapi/api.xpack.watcher.put_watch.go +++ b/esapi/api.xpack.watcher.put_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.query_watches.go b/esapi/api.xpack.watcher.query_watches.go index 5808d4e743..3f85f36812 100644 --- a/esapi/api.xpack.watcher.query_watches.go +++ b/esapi/api.xpack.watcher.query_watches.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.start.go b/esapi/api.xpack.watcher.start.go index 277a484c9a..5c9e540137 100644 --- a/esapi/api.xpack.watcher.start.go +++ b/esapi/api.xpack.watcher.start.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.stats.go b/esapi/api.xpack.watcher.stats.go index 6850110ea1..0eea4eb1ca 100644 --- a/esapi/api.xpack.watcher.stats.go +++ b/esapi/api.xpack.watcher.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.stop.go b/esapi/api.xpack.watcher.stop.go index d6ca0dc919..3835b69beb 100644 --- a/esapi/api.xpack.watcher.stop.go +++ b/esapi/api.xpack.watcher.stop.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.watcher.update_settings.go b/esapi/api.xpack.watcher.update_settings.go index c5ec219b2b..98462b847e 100644 --- a/esapi/api.xpack.watcher.update_settings.go +++ b/esapi/api.xpack.watcher.update_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.xpack.info.go b/esapi/api.xpack.xpack.info.go index f44342be90..dbffcf8715 100644 --- a/esapi/api.xpack.xpack.info.go +++ b/esapi/api.xpack.xpack.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/api.xpack.xpack.usage.go b/esapi/api.xpack.xpack.usage.go index f6943cd208..80d45e43f8 100644 --- a/esapi/api.xpack.xpack.usage.go +++ b/esapi/api.xpack.xpack.usage.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.16.0: DO NOT EDIT +// Code generated from specification version 8.18.0: DO NOT EDIT package esapi diff --git a/esapi/test/go.mod b/esapi/test/go.mod index 1239edc610..a6868020ff 100644 --- a/esapi/test/go.mod +++ b/esapi/test/go.mod @@ -7,7 +7,7 @@ toolchain go1.22.5 replace github.com/elastic/go-elasticsearch/v8 => ../../ require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.0.0-20210817150010-57d659deaca7 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.mod b/go.mod index aa45dfaa45..9cbe23195c 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 toolchain go1.21.0 require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 go.opentelemetry.io/otel/trace v1.28.0 ) diff --git a/go.sum b/go.sum index 5874f0b317..fddb85edcb 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/internal/build/cmd/generate/commands/gensource/model.go b/internal/build/cmd/generate/commands/gensource/model.go index 38e25f75c7..e827f12e12 100644 --- a/internal/build/cmd/generate/commands/gensource/model.go +++ b/internal/build/cmd/generate/commands/gensource/model.go @@ -43,7 +43,6 @@ func init() { } // NewEndpoint creates a new API endpoint. -// func NewEndpoint(f io.Reader) (*Endpoint, error) { var endpoint Endpoint var spec map[string]Endpoint @@ -101,6 +100,20 @@ func NewEndpoint(f io.Reader) (*Endpoint, error) { endpoint.URL.Params = endpoint.Params } + // These are implemented statically. + paramSkipList := map[string]bool{ + "human": true, + "pretty": true, + "error_trace": true, + "filter_path": true, + } + for name, _ := range endpoint.Params { + // remove from endpoint if it's in the skip list + if _, ok := paramSkipList[name]; ok { + delete(endpoint.Params, name) + } + } + if fpath, ok := f.(*os.File); ok { if strings.Contains(fpath.Name(), "x-pack") { endpoint.Type = "xpack" @@ -221,7 +234,6 @@ func NewEndpoint(f io.Reader) (*Endpoint, error) { } // Endpoint represents an API endpoint. -// type Endpoint struct { Name string `json:"-"` Type string `json:"-"` @@ -239,7 +251,6 @@ type Endpoint struct { } // URL represents API endpoint URL. -// type URL struct { Endpoint *Endpoint `json:"-"` @@ -268,7 +279,6 @@ type Path struct { } // Part represents part of the API endpoint URL. -// type Part struct { Endpoint *Endpoint `json:"-"` @@ -283,7 +293,6 @@ type Part struct { } // Param represents API endpoint parameter. -// type Param struct { Endpoint *Endpoint `json:"-"` @@ -297,7 +306,6 @@ type Param struct { } // Body represents API endpoint body. -// type Body struct { Endpoint *Endpoint `json:"-"` @@ -307,7 +315,6 @@ type Body struct { } // MethodArgument represents a method argument for API endpoint. -// type MethodArgument struct { Endpoint *Endpoint @@ -320,14 +327,12 @@ type MethodArgument struct { } // Namespace returns the API endpoint namespace. -// func (e *Endpoint) Namespace() string { ep := strings.Split(e.Name, ".") return utils.NameToGo(ep[0]) } // MethodName returns the API endpoint method name. -// func (e *Endpoint) MethodName() string { ep := strings.Split(e.Name, ".") ep = append(ep[:0], ep[1:]...) @@ -344,13 +349,11 @@ func (e *Endpoint) MethodName() string { } // MethodWithNamespace returns the API endpoint method name with namespace. -// func (e *Endpoint) MethodWithNamespace() string { return utils.APIToGo(e.Name) } // HumanMethodWithNamespace returns the API endpoint method name in humanized form. -// func (e *Endpoint) HumanMethodWithNamespace() string { var ( src = e.MethodWithNamespace() @@ -371,7 +374,6 @@ func (e *Endpoint) HumanMethodWithNamespace() string { } // RequiredArguments return the list of required method arguments. -// func (e *Endpoint) RequiredArguments() []MethodArgument { var args = make([]MethodArgument, 0) var prominentArgs = []string{ @@ -468,7 +470,6 @@ func (e *Endpoint) RequiredArguments() []MethodArgument { } // GoName returns a Go name for part. -// func (p *Part) GoName() string { switch { case p.Name == "context": @@ -479,13 +480,11 @@ func (p *Part) GoName() string { } // GoType returns a Go type for part. -// func (p *Part) GoType(comment ...bool) string { return utils.TypeToGo(p.Type) } // GoName returns a Go name for parameter. -// func (p *Param) GoName() string { switch { case p.Name == "context": @@ -498,7 +497,6 @@ func (p *Param) GoName() string { } // GoType returns a Go type for parameter. -// func (p *Param) GoType(comment ...bool) string { if f := (&Generator{Endpoint: p.Endpoint}).GetOverride("polymorphic-param", p.Endpoint.Name); f != nil { if out := f(p.Endpoint, p.Name); out != "" { @@ -509,13 +507,11 @@ func (p *Param) GoType(comment ...bool) string { } // GoName returns a Go name for method argument. -// func (p *MethodArgument) GoName() string { return utils.NameToGo(p.Name, p.Endpoint.MethodWithNamespace()) } // GoType returns a Go type for method argument. -// func (p *MethodArgument) GoType(comment ...bool) string { return utils.TypeToGo(p.Type) } diff --git a/internal/build/cmd/generate/commands/gentests/skips.go b/internal/build/cmd/generate/commands/gentests/skips.go index 6766e349e7..d0b6fc6f93 100644 --- a/internal/build/cmd/generate/commands/gentests/skips.go +++ b/internal/build/cmd/generate/commands/gentests/skips.go @@ -67,6 +67,15 @@ var skipFiles = []string{ ".*esql\\/.*.yml", "deprecation/10_basic.yml", // incompatible test generation "search/520_fetch_fields.yml", // disabled for inconsistency + "search.vectors/90_sparse_vector.yml", + "indices.create/21_synthetic_source_stored.yml", + "indices.create/20_synthetic_source.yml", + "indices.recovery/20_synthetic_source.yml", + "ingest_geoip/20_geoip_processor.yml", + "range/20_synthetic_source.yml", + "search/600_flattened_ignore_above.yml", + "search/540_ignore_above_synthetic_source.yml", + "update/100_synthetic_source.yml", } // TODO: Comments into descriptions for `Skip()` @@ -224,6 +233,13 @@ tsdb/40_search.yml: tsdb/70_dimension_types.yml: - flattened field missing routing path field +tsdb/25_id_generation.yml: + - delete over _bulk + +tsdb/90_unsupported_operations.yml: + - index with routing over _bulk + - update over _bulk + # Deliberate wrong type doesn't match Go types cluster.desired_nodes/10_basic.yml: - Test version must be a number @@ -464,6 +480,8 @@ nodes.stats/11_indices_metrics.yml: - Metric - blank for indices shards - Metric - _all for indices shards - indices shards total count test + - indices mappings exact count test for indices level + - Lucene segment level fields stats data_stream/10_data_stream_resolvability.yml: - Verify data stream resolvability in ILM remove policy API @@ -528,12 +546,22 @@ get/100_synthetic_source.yml: - non-indexed dense vectors - fields with ignore_malformed - flattened field with ignore_above + - fetch without refresh also produces synthetic source + - doc values keyword with ignore_above + - stored keyword with ignore_above + - flattened field + - flattened field with ignore_above and arrays indices.stats/70_write_load.yml: - Write load average is tracked at shard level search/400_synthetic_source.yml: - stored keyword without sibling fields + - doc values keyword with ignore_above + - stored keyword with ignore_above + +search/140_pre_filter_search_shards.yml: + - pre_filter_shard_size with shards that have no hit health/10_usage.yml: - Usage stats on the health API @@ -560,6 +588,7 @@ logsdb/10_settings.yml: - override sort order settings - override sort missing settings - override sort mode settings + - default ignore dynamic beyond limit and default sorting with hostname # expects map, got nil search/520_fetch_fields.yml: @@ -568,4 +597,40 @@ search/520_fetch_fields.yml: spatial/140_synthetic_source.yml: - point + +analysis-common/40_token_filters.yml: + - stemmer_override file access + +cluster.stats/30_ccs_stats.yml: + - cross-cluster search stats search + +cluster.stats/40_source_modes.yml: + - test source modes + +index/92_metrics_auto_subobjects.yml: + - Metrics object indexing with synthetic source + +index/91_metrics_no_subobjects.yml: + - Metrics object indexing with synthetic source + +ingest_geoip/40_geoip_databases.yml: + - Test adding, getting, and removing geoip databases + +ingest_geoip/30_geoip_stats.yml: + - Test geoip stats + +ingest_geoip/60_ip_location_databases.yml: + - Test adding, getting, and removing ip location databases + +ingest_geoip/50_ip_lookup_processor.yml: + - Test ip_location processor with defaults + +logsdb/20_source_mapping.yml: + - synthetic _source is default + +search.suggest/20_phrase.yml: + - breaks ties by sorting terms + +migrate/30_create_from.yml: + - Test create_from with remove_index_blocks default of true ` diff --git a/internal/build/go.mod b/internal/build/go.mod index 067e5ec37b..97a6e5d104 100644 --- a/internal/build/go.mod +++ b/internal/build/go.mod @@ -1,7 +1,6 @@ module github.com/elastic/go-elasticsearch/v8/internal/build -go 1.21 -toolchain go1.21.0 +go 1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ../../ @@ -10,7 +9,7 @@ require ( github.com/elastic/go-elasticsearch/v8 v8.0.0-20210817150010-57d659deaca7 github.com/spf13/cobra v1.8.0 golang.org/x/crypto v0.19.0 - golang.org/x/tools v0.22.0 + golang.org/x/tools v0.29.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -18,8 +17,8 @@ require ( github.com/dlclark/regexp2 v1.4.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/term v0.17.0 // indirect ) diff --git a/internal/build/go.sum b/internal/build/go.sum index f352adc484..ab37d788ad 100644 --- a/internal/build/go.sum +++ b/internal/build/go.sum @@ -6,6 +6,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -16,21 +18,20 @@ github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyh github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/internal/testing/go.mod b/internal/testing/go.mod index 27b491d696..5b4c19a0ce 100644 --- a/internal/testing/go.mod +++ b/internal/testing/go.mod @@ -1,28 +1,28 @@ module testing -go 1.21 -toolchain go1.21.0 +go 1.22.0 replace github.com/elastic/go-elasticsearch/v8 => ./../.. require ( - github.com/elastic/elastic-transport-go/v8 v8.6.0 + github.com/elastic/elastic-transport-go/v8 v8.6.1 github.com/elastic/go-elasticsearch/v8 v8.14.0 - github.com/testcontainers/testcontainers-go v0.31.0 - github.com/testcontainers/testcontainers-go/modules/elasticsearch v0.31.0 + github.com/testcontainers/testcontainers-go v0.34.0 + github.com/testcontainers/testcontainers-go/modules/elasticsearch v0.34.0 ) require ( dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/containerd/containerd v1.7.15 // indirect + github.com/containerd/containerd v1.7.18 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/cpuguy83/dockercfg v0.3.1 // indirect - github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v27.1.1+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -30,11 +30,12 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/klauspost/compress v1.16.0 // indirect + github.com/klauspost/compress v1.17.4 // indirect + github.com/kr/text v0.2.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.7 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect @@ -43,10 +44,12 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect + github.com/stretchr/testify v1.9.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect @@ -54,11 +57,8 @@ require ( go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect - golang.org/x/crypto v0.23.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/mod v0.16.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/tools v0.13.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d // indirect - google.golang.org/grpc v1.58.3 // indirect - google.golang.org/protobuf v1.33.0 // indirect + golang.org/x/sys v0.21.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/internal/testing/go.sum b/internal/testing/go.sum index 1fce32d1f0..57d202819f 100644 --- a/internal/testing/go.sum +++ b/internal/testing/go.sum @@ -4,33 +4,34 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9 github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= -github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/containerd/containerd v1.7.15 h1:afEHXdil9iAm03BmhjzKyXnnEBtjaLJefdU7DV0IFes= -github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY= +github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= +github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= -github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= -github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/elastic-transport-go/v8 v8.6.1 h1:h2jQRqH6eLGiBSN4eZbQnJLtL4bC5b4lfVFRjw2R4e4= +github.com/elastic/elastic-transport-go/v8 v8.6.1/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -42,8 +43,6 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -54,12 +53,18 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= -github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= @@ -80,6 +85,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -91,16 +98,18 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/testcontainers/testcontainers-go v0.31.0 h1:W0VwIhcEVhRflwL9as3dhY6jXjVCA27AkmbnZ+UTh3U= -github.com/testcontainers/testcontainers-go v0.31.0/go.mod h1:D2lAoA0zUFiSY+eAflqK5mcUx/A5hrrORaEQrd0SefI= -github.com/testcontainers/testcontainers-go/modules/elasticsearch v0.31.0 h1:KBbU/rVL3RhrFYcrVGY+NDw3x3Ho2YlJUnjbIOsL6jk= -github.com/testcontainers/testcontainers-go/modules/elasticsearch v0.31.0/go.mod h1:7YQbgJUoNDztnXWAdCRtI+gUqBM+URd83JzwYlzwGhQ= +github.com/testcontainers/testcontainers-go v0.34.0 h1:5fbgF0vIN5u+nD3IWabQwRybuB4GY8G2HHgCkbMzMHo= +github.com/testcontainers/testcontainers-go v0.34.0/go.mod h1:6P/kMkQe8yqPHfPWNulFGdFHTD8HB2vLq/231xY2iPQ= +github.com/testcontainers/testcontainers-go/modules/elasticsearch v0.34.0 h1:BBwJUs9xBpt1uOfO+yAr2pYW75MsyzuO/o70HTPnhe4= +github.com/testcontainers/testcontainers-go/modules/elasticsearch v0.34.0/go.mod h1:OqhRGYR+5VG0Dw506F6Ho9I4YG1kB+o9uPTKC0uPUA8= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -128,8 +137,8 @@ go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v8 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= @@ -138,13 +147,11 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -155,38 +162,38 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d h1:pgIUhmqwKOUlnKna4r6amKdUngdL8DrkpFeV8+VBElY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 h1:vlzZttNJGVqTsRFU9AmdnrcO1Znh8Ew9kCD//yjigk0= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= +google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= diff --git a/internal/version/version.go b/internal/version/version.go index 1adc3470fd..7face89b44 100644 --- a/internal/version/version.go +++ b/internal/version/version.go @@ -18,4 +18,4 @@ package version // Client returns the client version as a string. -const Client = "8.16.0-SNAPSHOT" +const Client = "8.18.0-SNAPSHOT" diff --git a/typedapi/api._.go b/typedapi/api._.go index b143d085de..6af2ff878c 100644 --- a/typedapi/api._.go +++ b/typedapi/api._.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package typedapi @@ -95,14 +95,19 @@ import ( connector_put "github.com/elastic/go-elasticsearch/v8/typedapi/connector/put" connector_secret_post "github.com/elastic/go-elasticsearch/v8/typedapi/connector/secretpost" connector_sync_job_cancel "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobcancel" + connector_sync_job_check_in "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobcheckin" + connector_sync_job_claim "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobclaim" connector_sync_job_delete "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobdelete" + connector_sync_job_error "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjoberror" connector_sync_job_get "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobget" connector_sync_job_list "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjoblist" connector_sync_job_post "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobpost" + connector_sync_job_update_stats "github.com/elastic/go-elasticsearch/v8/typedapi/connector/syncjobupdatestats" connector_update_active_filtering "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateactivefiltering" connector_update_api_key_id "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateapikeyid" connector_update_configuration "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateconfiguration" connector_update_error "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateerror" + connector_update_features "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updatefeatures" connector_update_filtering "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updatefiltering" connector_update_filtering_validation "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updatefilteringvalidation" connector_update_index_name "github.com/elastic/go-elasticsearch/v8/typedapi/connector/updateindexname" @@ -169,6 +174,9 @@ import ( eql_get_status "github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus" eql_search "github.com/elastic/go-elasticsearch/v8/typedapi/eql/search" esql_async_query "github.com/elastic/go-elasticsearch/v8/typedapi/esql/asyncquery" + esql_async_query_delete "github.com/elastic/go-elasticsearch/v8/typedapi/esql/asyncquerydelete" + esql_async_query_get "github.com/elastic/go-elasticsearch/v8/typedapi/esql/asyncqueryget" + esql_async_query_stop "github.com/elastic/go-elasticsearch/v8/typedapi/esql/asyncquerystop" esql_query "github.com/elastic/go-elasticsearch/v8/typedapi/esql/query" features_get_features "github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures" features_reset_features "github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures" @@ -190,11 +198,13 @@ import ( ilm_stop "github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop" indices_add_block "github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock" indices_analyze "github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze" + indices_cancel_migrate_reindex "github.com/elastic/go-elasticsearch/v8/typedapi/indices/cancelmigratereindex" indices_clear_cache "github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache" indices_clone "github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone" indices_close "github.com/elastic/go-elasticsearch/v8/typedapi/indices/close" indices_create "github.com/elastic/go-elasticsearch/v8/typedapi/indices/create" indices_create_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream" + indices_create_from "github.com/elastic/go-elasticsearch/v8/typedapi/indices/createfrom" indices_data_streams_stats "github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats" indices_delete "github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete" indices_delete_alias "github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias" @@ -215,12 +225,15 @@ import ( indices_get "github.com/elastic/go-elasticsearch/v8/typedapi/indices/get" indices_get_alias "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias" indices_get_data_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle" + indices_get_data_lifecycle_stats "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecyclestats" indices_get_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream" indices_get_field_mapping "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping" indices_get_index_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate" indices_get_mapping "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping" + indices_get_migrate_reindex_status "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmigratereindexstatus" indices_get_settings "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings" indices_get_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate" + indices_migrate_reindex "github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratereindex" indices_migrate_to_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream" indices_modify_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream" indices_open "github.com/elastic/go-elasticsearch/v8/typedapi/indices/open" @@ -247,14 +260,28 @@ import ( indices_unfreeze "github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze" indices_update_aliases "github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases" indices_validate_query "github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery" + inference_chat_completion_unified "github.com/elastic/go-elasticsearch/v8/typedapi/inference/chatcompletionunified" + inference_completion "github.com/elastic/go-elasticsearch/v8/typedapi/inference/completion" inference_delete "github.com/elastic/go-elasticsearch/v8/typedapi/inference/delete" inference_get "github.com/elastic/go-elasticsearch/v8/typedapi/inference/get" - inference_inference "github.com/elastic/go-elasticsearch/v8/typedapi/inference/inference" inference_put "github.com/elastic/go-elasticsearch/v8/typedapi/inference/put" + inference_put_openai "github.com/elastic/go-elasticsearch/v8/typedapi/inference/putopenai" + inference_put_watsonx "github.com/elastic/go-elasticsearch/v8/typedapi/inference/putwatsonx" + inference_rerank "github.com/elastic/go-elasticsearch/v8/typedapi/inference/rerank" + inference_sparse_embedding "github.com/elastic/go-elasticsearch/v8/typedapi/inference/sparseembedding" + inference_stream_completion "github.com/elastic/go-elasticsearch/v8/typedapi/inference/streamcompletion" + inference_text_embedding "github.com/elastic/go-elasticsearch/v8/typedapi/inference/textembedding" + inference_update "github.com/elastic/go-elasticsearch/v8/typedapi/inference/update" + ingest_delete_geoip_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletegeoipdatabase" + ingest_delete_ip_location_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deleteiplocationdatabase" ingest_delete_pipeline "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline" ingest_geo_ip_stats "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats" + ingest_get_geoip_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getgeoipdatabase" + ingest_get_ip_location_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getiplocationdatabase" ingest_get_pipeline "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline" ingest_processor_grok "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok" + ingest_put_geoip_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putgeoipdatabase" + ingest_put_ip_location_database "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putiplocationdatabase" ingest_put_pipeline "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline" ingest_simulate "github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate" license_delete "github.com/elastic/go-elasticsearch/v8/typedapi/license/delete" @@ -362,6 +389,7 @@ import ( query_rules_list_rulesets "github.com/elastic/go-elasticsearch/v8/typedapi/queryrules/listrulesets" query_rules_put_rule "github.com/elastic/go-elasticsearch/v8/typedapi/queryrules/putrule" query_rules_put_ruleset "github.com/elastic/go-elasticsearch/v8/typedapi/queryrules/putruleset" + query_rules_test "github.com/elastic/go-elasticsearch/v8/typedapi/queryrules/test" rollup_delete_job "github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob" rollup_get_jobs "github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs" rollup_get_rollup_caps "github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps" @@ -379,8 +407,10 @@ import ( search_application_get "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get" search_application_get_behavioral_analytics "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics" search_application_list "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list" + search_application_post_behavioral_analytics_event "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/postbehavioralanalyticsevent" search_application_put "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put" search_application_put_behavioral_analytics "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics" + search_application_render_query "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/renderquery" search_application_search "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search" security_activate_user_profile "github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile" security_authenticate "github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate" @@ -396,6 +426,7 @@ import ( security_create_api_key "github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey" security_create_cross_cluster_api_key "github.com/elastic/go-elasticsearch/v8/typedapi/security/createcrossclusterapikey" security_create_service_token "github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken" + security_delegate_pki "github.com/elastic/go-elasticsearch/v8/typedapi/security/delegatepki" security_delete_privileges "github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges" security_delete_role "github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole" security_delete_role_mapping "github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping" @@ -442,11 +473,13 @@ import ( security_saml_service_provider_metadata "github.com/elastic/go-elasticsearch/v8/typedapi/security/samlserviceprovidermetadata" security_suggest_user_profiles "github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles" security_update_api_key "github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey" + security_update_cross_cluster_api_key "github.com/elastic/go-elasticsearch/v8/typedapi/security/updatecrossclusterapikey" security_update_settings "github.com/elastic/go-elasticsearch/v8/typedapi/security/updatesettings" security_update_user_profile_data "github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata" shutdown_delete_node "github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode" shutdown_get_node "github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode" shutdown_put_node "github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode" + simulate_ingest "github.com/elastic/go-elasticsearch/v8/typedapi/simulate/ingest" slm_delete_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle" slm_execute_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle" slm_execute_retention "github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention" @@ -464,6 +497,8 @@ import ( snapshot_delete_repository "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository" snapshot_get "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get" snapshot_get_repository "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository" + snapshot_repository_analyze "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/repositoryanalyze" + snapshot_repository_verify_integrity "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/repositoryverifyintegrity" snapshot_restore "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore" snapshot_status "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status" snapshot_verify_repository "github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/verifyrepository" @@ -518,8 +553,9 @@ import ( ) type AsyncSearch struct { - // Deletes an async search by identifier. - // If the search is still running, the search request will be cancelled. + // Delete an async search. + // + // If the asynchronous search is still running, it is cancelled. // Otherwise, the saved search results are deleted. // If the Elasticsearch security features are enabled, the deletion of a // specific async search is restricted to: the authenticated user that submitted @@ -527,29 +563,36 @@ type AsyncSearch struct { // privilege. // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html Delete async_search_delete.NewDelete - // Retrieves the results of a previously submitted async search request given - // its identifier. + // Get async search results. + // + // Retrieve the results of a previously submitted asynchronous search request. // If the Elasticsearch security features are enabled, access to the results of // a specific async search is restricted to the user or API key that submitted // it. // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html Get async_search_get.NewGet - // Get async search status - // Retrieves the status of a previously submitted async search request given its + // Get the async search status. + // + // Get the status of a previously submitted async search request given its // identifier, without retrieving search results. - // If the Elasticsearch security features are enabled, use of this API is - // restricted to the `monitoring_user` role. + // If the Elasticsearch security features are enabled, the access to the status + // of a specific async search is restricted to: + // + // * The user or API key that submitted the original async search request. + // * Users that have the `monitor` cluster privilege or greater privileges. // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html Status async_search_status.NewStatus - // Runs a search request asynchronously. + // Run an async search. + // // When the primary sort of the results is an indexed field, shards get sorted - // based on minimum and maximum value that they hold for that field, hence - // partial results become available following the sort criteria that was - // requested. - // Warning: Async search does not support scroll nor search requests that only - // include the suggest section. - // By default, Elasticsearch doesn’t allow you to store an async search response - // larger than 10Mb and an attempt to do this results in an error. + // based on minimum and maximum value that they hold for that field. Partial + // results become available following the sort criteria that was requested. + // + // Warning: Asynchronous search does not support scroll or search requests that + // include only the suggest section. + // + // By default, Elasticsearch does not allow you to store an async search + // response larger than 10Mb and an attempt to do this results in an error. // The maximum allowed size for a stored async search response can be set by // changing the `search.max_async_search_response_size` cluster level setting. // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html @@ -557,21 +600,50 @@ type AsyncSearch struct { } type Autoscaling struct { - // Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. - // Direct use is not supported. + // Delete an autoscaling policy. + // + // NOTE: This feature is designed for indirect use by Elasticsearch Service, + // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + // supported. // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html DeleteAutoscalingPolicy autoscaling_delete_autoscaling_policy.NewDeleteAutoscalingPolicy - // Gets the current autoscaling capacity based on the configured autoscaling - // policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not + // Get the autoscaling capacity. + // + // NOTE: This feature is designed for indirect use by Elasticsearch Service, + // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. + // + // This API gets the current autoscaling capacity based on the configured + // autoscaling policy. + // It will return information to size the cluster appropriately to the current + // workload. + // + // The `required_capacity` is calculated as the maximum of the + // `required_capacity` result of all individual deciders that are enabled for + // the policy. + // + // The operator should verify that the `current_nodes` match the operator’s + // knowledge of the cluster to avoid making autoscaling decisions based on stale + // or incomplete information. + // + // The response contains decider-specific information you can use to diagnose + // how and why autoscaling determined a certain capacity was required. + // This information is provided for diagnosis only. + // Do not use this information to make autoscaling decisions. // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html GetAutoscalingCapacity autoscaling_get_autoscaling_capacity.NewGetAutoscalingCapacity - // Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and - // ECK. Direct use is not supported. + // Get an autoscaling policy. + // + // NOTE: This feature is designed for indirect use by Elasticsearch Service, + // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + // supported. // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html GetAutoscalingPolicy autoscaling_get_autoscaling_policy.NewGetAutoscalingPolicy - // Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and - // ECK. Direct use is not supported. + // Create or update an autoscaling policy. + // + // NOTE: This feature is designed for indirect use by Elasticsearch Service, + // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + // supported. // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html PutAutoscalingPolicy autoscaling_put_autoscaling_policy.NewPutAutoscalingPolicy } @@ -585,54 +657,63 @@ type Capabilities struct { type Cat struct { // Get aliases. - // Retrieves the cluster’s index aliases, including filter and routing - // information. - // The API does not return data stream aliases. // - // CAT APIs are only intended for human consumption using the command line or - // the Kibana console. They are not intended for use by applications. For - // application consumption, use the aliases API. + // Get the cluster's index aliases, including filter and routing information. + // This API does not return data stream aliases. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the command + // line or the Kibana console. They are not intended for use by applications. + // For application consumption, use the aliases API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-alias.html Aliases cat_aliases.NewAliases - // Provides a snapshot of the number of shards allocated to each data node and - // their disk space. - // IMPORTANT: cat APIs are only intended for human consumption using the command + // Get shard allocation information. + // + // Get a snapshot of the number of shards allocated to each data node and their + // disk space. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html Allocation cat_allocation.NewAllocation // Get component templates. - // Returns information about component templates in a cluster. + // + // Get information about component templates in a cluster. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // - // CAT APIs are only intended for human consumption using the command line or - // Kibana console. + // IMPORTANT: CAT APIs are only intended for human consumption using the command + // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the get component template API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-component-templates.html ComponentTemplates cat_component_templates.NewComponentTemplates // Get a document count. - // Provides quick access to a document count for a data stream, an index, or an + // + // Get quick access to a document count for a data stream, an index, or an // entire cluster. // The document count only includes live documents, not deleted documents which // have not yet been removed by the merge process. // - // CAT APIs are only intended for human consumption using the command line or - // Kibana console. + // IMPORTANT: CAT APIs are only intended for human consumption using the command + // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the count API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html Count cat_count.NewCount - // Returns the amount of heap memory currently used by the field data cache on - // every data node in the cluster. + // Get field data cache information. + // + // Get the amount of heap memory currently used by the field data cache on every + // data node in the cluster. + // // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the nodes stats API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-fielddata.html Fielddata cat_fielddata.NewFielddata - // Returns the health status of a cluster, similar to the cluster health API. - // IMPORTANT: cat APIs are only intended for human consumption using the command + // Get the cluster health status. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the cluster health API. @@ -649,11 +730,13 @@ type Cat struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-health.html Health cat_health.NewHealth // Get CAT help. - // Returns help for the CAT APIs. + // + // Get help for the CAT APIs. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html Help cat_help.NewHelp // Get index information. - // Returns high-level information about indices in a cluster, including backing + // + // Get high-level information about indices in a cluster, including backing // indices for data streams. // // Use this request to get the following information for each index in a @@ -676,79 +759,96 @@ type Cat struct { // use an index endpoint. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html Indices cat_indices.NewIndices - // Returns information about the master node, including the ID, bound IP - // address, and name. + // Get master node information. + // + // Get information about the master node, including the ID, bound IP address, + // and name. + // // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-master.html Master cat_master.NewMaster // Get data frame analytics jobs. - // Returns configuration and usage information about data frame analytics jobs. // - // CAT APIs are only intended for human consumption using the Kibana + // Get configuration and usage information about data frame analytics jobs. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get data frame analytics jobs statistics // API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-dfanalytics.html MlDataFrameAnalytics cat_ml_data_frame_analytics.NewMlDataFrameAnalytics // Get datafeeds. - // Returns configuration and usage information about datafeeds. + // + // Get configuration and usage information about datafeeds. // This API returns a maximum of 10,000 datafeeds. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, `monitor`, `manage_ml`, or `manage` // cluster privileges to use this API. // - // CAT APIs are only intended for human consumption using the Kibana + // IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get datafeed statistics API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-datafeeds.html MlDatafeeds cat_ml_datafeeds.NewMlDatafeeds // Get anomaly detection jobs. - // Returns configuration and usage information for anomaly detection jobs. + // + // Get configuration and usage information for anomaly detection jobs. // This API returns a maximum of 10,000 jobs. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, // `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. // - // CAT APIs are only intended for human consumption using the Kibana + // IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get anomaly detection job statistics API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html MlJobs cat_ml_jobs.NewMlJobs // Get trained models. - // Returns configuration and usage information about inference trained models. // - // CAT APIs are only intended for human consumption using the Kibana + // Get configuration and usage information about inference trained models. + // + // IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get trained models statistics API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-trained-model.html MlTrainedModels cat_ml_trained_models.NewMlTrainedModels - // Returns information about custom node attributes. + // Get node attribute information. + // + // Get information about custom node attributes. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodeattrs.html Nodeattrs cat_nodeattrs.NewNodeattrs - // Returns information about the nodes in a cluster. + // Get node information. + // + // Get information about the nodes in a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html Nodes cat_nodes.NewNodes - // Returns cluster-level changes that have not yet been executed. + // Get pending task information. + // + // Get information about cluster-level changes that have not yet taken effect. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the pending cluster tasks API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-pending-tasks.html PendingTasks cat_pending_tasks.NewPendingTasks - // Returns a list of plugins running on each node of a cluster. + // Get plugin information. + // + // Get a list of plugins running on each node of a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-plugins.html Plugins cat_plugins.NewPlugins - // Returns information about ongoing and completed shard recoveries. + // Get shard recovery information. + // + // Get information about ongoing and completed shard recoveries. // Shard recovery is the process of initializing a shard copy, such as restoring // a primary shard from a snapshot or syncing a replica shard from a primary // shard. When a shard recovery completes, the recovered shard is available for @@ -760,39 +860,51 @@ type Cat struct { // application consumption, use the index recovery API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-recovery.html Recovery cat_recovery.NewRecovery - // Returns the snapshot repositories for a cluster. + // Get snapshot repository information. + // + // Get a list of snapshot repositories for a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get snapshot repository API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-repositories.html Repositories cat_repositories.NewRepositories - // Returns low-level information about the Lucene segments in index shards. + // Get segment information. + // + // Get low-level information about the Lucene segments in index shards. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the index segments API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html Segments cat_segments.NewSegments - // Returns information about the shards in a cluster. + // Get shard information. + // + // Get information about the shards in a cluster. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html Shards cat_shards.NewShards - // Returns information about the snapshots stored in one or more repositories. + // Get snapshot information. + // + // Get information about the snapshots stored in one or more repositories. // A snapshot is a backup of an index or running Elasticsearch cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get snapshot API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-snapshots.html Snapshots cat_snapshots.NewSnapshots - // Returns information about tasks currently executing in the cluster. + // Get task information. + // + // Get information about tasks currently running in the cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the task management API. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-tasks.html Tasks cat_tasks.NewTasks - // Returns information about index templates in a cluster. + // Get index template information. + // + // Get information about the index templates in a cluster. // You can use index templates to apply index settings and field mappings to new // indices at creation. // IMPORTANT: cat APIs are only intended for human consumption using the command @@ -800,7 +912,9 @@ type Cat struct { // application consumption, use the get index template API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html Templates cat_templates.NewTemplates - // Returns thread pool statistics for each node in a cluster. + // Get thread pool statistics. + // + // Get thread pool statistics for each node in a cluster. // Returned information includes all built-in thread pools and custom thread // pools. // IMPORTANT: cat APIs are only intended for human consumption using the command @@ -808,8 +922,9 @@ type Cat struct { // application consumption, use the nodes info API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html ThreadPool cat_thread_pool.NewThreadPool - // Get transforms. - // Returns configuration and usage information about transforms. + // Get transform information. + // + // Get configuration and usage information about transforms. // // CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For @@ -819,66 +934,163 @@ type Cat struct { } type Ccr struct { - // Deletes auto-follow patterns. + // Delete auto-follow patterns. + // + // Delete a collection of cross-cluster replication auto-follow patterns. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html DeleteAutoFollowPattern ccr_delete_auto_follow_pattern.NewDeleteAutoFollowPattern - // Creates a new follower index configured to follow the referenced leader - // index. + // Create a follower. + // Create a cross-cluster replication follower index that follows a specific + // leader index. + // When the API returns, the follower index exists and cross-cluster replication + // starts replicating operations from the leader index to the follower index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html Follow ccr_follow.NewFollow - // Retrieves information about all follower indices, including parameters and - // status for each follower index + // Get follower information. + // + // Get information about all cross-cluster replication follower indices. + // For example, the results include follower index names, leader index names, + // replication options, and whether the follower indices are active or paused. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html FollowInfo ccr_follow_info.NewFollowInfo - // Retrieves follower stats. return shard-level stats about the following tasks - // associated with each shard for the specified indices. + // Get follower stats. + // + // Get cross-cluster replication follower stats. + // The API returns shard-level stats about the "following tasks" associated with + // each shard for the specified indices. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html FollowStats ccr_follow_stats.NewFollowStats - // Removes the follower retention leases from the leader. + // Forget a follower. + // Remove the cross-cluster replication follower retention leases from the + // leader. + // + // A following index takes out retention leases on its leader index. + // These leases are used to increase the likelihood that the shards of the + // leader index retain the history of operations that the shards of the + // following index need to run replication. + // When a follower index is converted to a regular index by the unfollow API + // (either by directly calling the API or by index lifecycle management tasks), + // these leases are removed. + // However, removal of the leases can fail, for example when the remote cluster + // containing the leader index is unavailable. + // While the leases will eventually expire on their own, their extended + // existence can cause the leader index to hold more history than necessary and + // prevent index lifecycle management from performing some operations on the + // leader index. + // This API exists to enable manually removing the leases when the unfollow API + // is unable to do so. + // + // NOTE: This API does not stop replication by a following index. If you use + // this API with a follower index that is still actively following, the + // following index will add back retention leases on the leader. + // The only purpose of this API is to handle the case of failure to remove the + // following retention leases after the unfollow API is invoked. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-forget-follower.html ForgetFollower ccr_forget_follower.NewForgetFollower - // Gets configured auto-follow patterns. Returns the specified auto-follow - // pattern collection. + // Get auto-follow patterns. + // + // Get cross-cluster replication auto-follow patterns. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html GetAutoFollowPattern ccr_get_auto_follow_pattern.NewGetAutoFollowPattern - // Pauses an auto-follow pattern + // Pause an auto-follow pattern. + // + // Pause a cross-cluster replication auto-follow pattern. + // When the API returns, the auto-follow pattern is inactive. + // New indices that are created on the remote cluster and match the auto-follow + // patterns are ignored. + // + // You can resume auto-following with the resume auto-follow pattern API. + // When it resumes, the auto-follow pattern is active again and automatically + // configures follower indices for newly created indices on the remote cluster + // that match its patterns. + // Remote indices that were created while the pattern was paused will also be + // followed, unless they have been deleted or closed in the interim. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html PauseAutoFollowPattern ccr_pause_auto_follow_pattern.NewPauseAutoFollowPattern - // Pauses a follower index. The follower index will not fetch any additional - // operations from the leader index. + // Pause a follower. + // + // Pause a cross-cluster replication follower index. + // The follower index will not fetch any additional operations from the leader + // index. + // You can resume following with the resume follower API. + // You can pause and resume a follower index to change the configuration of the + // following task. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html PauseFollow ccr_pause_follow.NewPauseFollow - // Creates a new named collection of auto-follow patterns against a specified - // remote cluster. Newly created indices on the remote cluster matching any of - // the specified patterns will be automatically configured as follower indices. + // Create or update auto-follow patterns. + // Create a collection of cross-cluster replication auto-follow patterns for a + // remote cluster. + // Newly created indices on the remote cluster that match any of the patterns + // are automatically configured as follower indices. + // Indices on the remote cluster that were created before the auto-follow + // pattern was created will not be auto-followed even if they match the pattern. + // + // This API can also be used to update auto-follow patterns. + // NOTE: Follower indices that were configured automatically before updating an + // auto-follow pattern will remain unchanged even if they do not match against + // the new patterns. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html PutAutoFollowPattern ccr_put_auto_follow_pattern.NewPutAutoFollowPattern - // Resumes an auto-follow pattern that has been paused + // Resume an auto-follow pattern. + // + // Resume a cross-cluster replication auto-follow pattern that was paused. + // The auto-follow pattern will resume configuring following indices for newly + // created indices that match its patterns on the remote cluster. + // Remote indices created while the pattern was paused will also be followed + // unless they have been deleted or closed in the interim. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html ResumeAutoFollowPattern ccr_resume_auto_follow_pattern.NewResumeAutoFollowPattern - // Resumes a follower index that has been paused + // Resume a follower. + // Resume a cross-cluster replication follower index that was paused. + // The follower index could have been paused with the pause follower API. + // Alternatively it could be paused due to replication that cannot be retried + // due to failures during following tasks. + // When this API returns, the follower index will resume fetching operations + // from the leader index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html ResumeFollow ccr_resume_follow.NewResumeFollow - // Gets all stats related to cross-cluster replication. + // Get cross-cluster replication stats. + // + // This API returns stats about auto-following and the same shard-level stats as + // the get follower stats API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html Stats ccr_stats.NewStats - // Stops the following task associated with a follower index and removes index - // metadata and settings associated with cross-cluster replication. + // Unfollow an index. + // + // Convert a cross-cluster replication follower index to a regular index. + // The API stops the following task associated with a follower index and removes + // index metadata and settings associated with cross-cluster replication. + // The follower index must be paused and closed before you call the unfollow + // API. + // + // > info + // > Currently cross-cluster replication does not support converting an existing + // regular index to a follower index. Converting a follower index to a regular + // index is an irreversible operation. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-unfollow.html Unfollow ccr_unfollow.NewUnfollow } type Cluster struct { - // Provides explanations for shard allocations in the cluster. + // Explain the shard allocations. + // Get explanations for shard allocations in the cluster. + // For unassigned shards, it provides an explanation for why the shard is + // unassigned. + // For assigned shards, it provides an explanation for why the shard is + // remaining on its current node and has not moved or rebalanced to another + // node. + // This API can be very useful when attempting to diagnose why a shard is + // unassigned or why a shard continues to remain on its current node when you + // might expect otherwise. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-allocation-explain.html AllocationExplain cluster_allocation_explain.NewAllocationExplain // Delete component templates. - // Deletes component templates. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html DeleteComponentTemplate cluster_delete_component_template.NewDeleteComponentTemplate - // Clears cluster voting config exclusions. + // Clear cluster voting config exclusions. + // Remove master-eligible nodes from the voting configuration exclusion list. // https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html DeleteVotingConfigExclusions cluster_delete_voting_config_exclusions.NewDeleteVotingConfigExclusions // Check component templates. @@ -886,33 +1098,40 @@ type Cluster struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html ExistsComponentTemplate cluster_exists_component_template.NewExistsComponentTemplate // Get component templates. - // Retrieves information about component templates. + // Get information about component templates. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html GetComponentTemplate cluster_get_component_template.NewGetComponentTemplate - // Returns cluster-wide settings. + // Get cluster-wide settings. // By default, it returns only settings that have been explicitly defined. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-get-settings.html GetSettings cluster_get_settings.NewGetSettings - // The cluster health API returns a simple status on the health of the cluster. + // Get the cluster health status. // You can also use the API to get the health status of only specified data - // streams and indices. For data streams, the API retrieves the health status of - // the stream’s backing indices. - // The cluster health status is: green, yellow or red. On the shard level, a red - // status indicates that the specific shard is not allocated in the cluster, - // yellow means that the primary shard is allocated but replicas are not, and - // green means that all shards are allocated. The index level status is - // controlled by the worst shard status. The cluster status is controlled by the - // worst index status. + // streams and indices. + // For data streams, the API retrieves the health status of the stream’s backing + // indices. + // + // The cluster health status is: green, yellow or red. + // On the shard level, a red status indicates that the specific shard is not + // allocated in the cluster. Yellow means that the primary shard is allocated + // but replicas are not. Green means that all shards are allocated. + // The index level status is controlled by the worst shard status. + // + // One of the main benefits of the API is the ability to wait until the cluster + // reaches a certain high watermark health level. + // The cluster status is controlled by the worst index status. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html Health cluster_health.NewHealth // Get cluster info. // Returns basic information about the cluster. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-info.html Info cluster_info.NewInfo - // Returns cluster-level changes (such as create index, update mapping, allocate - // or fail shard) that have not yet been executed. + // Get the pending cluster tasks. + // Get information about cluster-level changes (such as create index, update + // mapping, allocate or fail shard) that have not yet taken effect. + // // NOTE: This API returns a list of any pending updates to the cluster state. - // These are distinct from the tasks reported by the Task Management API which + // These are distinct from the tasks reported by the task management API which // include periodic tasks and tasks initiated by the user, such as node stats, // search queries, or create index requests. // However, if a user-initiated task such as a create index command causes a @@ -920,11 +1139,48 @@ type Cluster struct { // task api and pending cluster tasks API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-pending.html PendingTasks cluster_pending_tasks.NewPendingTasks - // Updates the cluster voting config exclusions by node ids or node names. + // Update voting configuration exclusions. + // Update the cluster voting config exclusions by node IDs or node names. + // By default, if there are more than three master-eligible nodes in the cluster + // and you remove fewer than half of the master-eligible nodes in the cluster at + // once, the voting configuration automatically shrinks. + // If you want to shrink the voting configuration to contain fewer than three + // nodes or to remove half or more of the master-eligible nodes in the cluster + // at once, use this API to remove departing nodes from the voting configuration + // manually. + // The API adds an entry for each specified node to the cluster’s voting + // configuration exclusions list. + // It then waits until the cluster has reconfigured its voting configuration to + // exclude the specified nodes. + // + // Clusters should have no voting configuration exclusions in normal operation. + // Once the excluded nodes have stopped, clear the voting configuration + // exclusions with `DELETE /_cluster/voting_config_exclusions`. + // This API waits for the nodes to be fully removed from the cluster before it + // returns. + // If your cluster has voting configuration exclusions for nodes that you no + // longer intend to remove, use `DELETE + // /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the + // voting configuration exclusions without waiting for the nodes to leave the + // cluster. + // + // A response to `POST /_cluster/voting_config_exclusions` with an HTTP status + // code of 200 OK guarantees that the node has been removed from the voting + // configuration and will not be reinstated until the voting configuration + // exclusions are cleared by calling `DELETE + // /_cluster/voting_config_exclusions`. + // If the call to `POST /_cluster/voting_config_exclusions` fails or returns a + // response with an HTTP status code other than 200 OK then the node may not + // have been removed from the voting configuration. + // In that case, you may safely retry the call. + // + // NOTE: Voting exclusions are required only when you remove at least half of + // the master-eligible nodes from a cluster in a short time period. + // They are not required when removing master-ineligible nodes or when removing + // fewer than half of the master-eligible nodes. // https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html PostVotingConfigExclusions cluster_post_voting_config_exclusions.NewPostVotingConfigExclusions // Create or update a component template. - // Creates or updates a component template. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // @@ -947,24 +1203,134 @@ type Cluster struct { // You can use C-style `/* *\/` block comments in component templates. // You can include comments anywhere in the request body except before the // opening curly bracket. + // + // **Applying component templates** + // + // You cannot directly apply a component template to a data stream or index. + // To be applied, a component template must be included in an index template's + // `composed_of` list. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html PutComponentTemplate cluster_put_component_template.NewPutComponentTemplate - // Updates the cluster settings. + // Update the cluster settings. + // Configure and update dynamic settings on a running cluster. + // You can also configure dynamic settings locally on an unstarted or shut down + // node in `elasticsearch.yml`. + // + // Updates made with this API can be persistent, which apply across cluster + // restarts, or transient, which reset after a cluster restart. + // You can also reset transient or persistent settings by assigning them a null + // value. + // + // If you configure the same setting using multiple methods, Elasticsearch + // applies the settings in following order of precedence: 1) Transient setting; + // 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting + // value. + // For example, you can apply a transient setting to override a persistent + // setting or `elasticsearch.yml` setting. + // However, a change to an `elasticsearch.yml` setting will not override a + // defined transient or persistent setting. + // + // TIP: In Elastic Cloud, use the user settings feature to configure all cluster + // settings. This method automatically rejects unsafe settings that could break + // your cluster. + // If you run Elasticsearch on your own hardware, use this API to configure + // dynamic cluster settings. + // Only use `elasticsearch.yml` for static cluster settings and node settings. + // The API doesn’t require a restart and ensures a setting’s value is the same + // on all nodes. + // + // WARNING: Transient cluster settings are no longer recommended. Use persistent + // cluster settings instead. + // If a cluster becomes unstable, transient settings can clear unexpectedly, + // resulting in a potentially undesired cluster configuration. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html PutSettings cluster_put_settings.NewPutSettings - // The cluster remote info API allows you to retrieve all of the configured - // remote cluster information. It returns connection and endpoint information - // keyed by the configured remote cluster alias. + // Get remote cluster information. + // + // Get information about configured remote clusters. + // The API returns connection and endpoint information keyed by the configured + // remote cluster alias. + // + // > info + // > This API returns information that reflects current state on the local + // cluster. + // > The `connected` field does not necessarily reflect whether a remote cluster + // is down or unavailable, only whether there is currently an open connection to + // it. + // > Elasticsearch does not spontaneously try to reconnect to a disconnected + // remote cluster. + // > To trigger a reconnection, attempt a cross-cluster search, ES|QL + // cross-cluster search, or try the [resolve cluster + // endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-remote-info.html RemoteInfo cluster_remote_info.NewRemoteInfo - // Allows to manually change the allocation of individual shards in the cluster. + // Reroute the cluster. + // Manually change the allocation of individual shards in the cluster. + // For example, a shard can be moved from one node to another explicitly, an + // allocation can be canceled, and an unassigned shard can be explicitly + // allocated to a specific node. + // + // It is important to note that after processing any reroute commands + // Elasticsearch will perform rebalancing as normal (respecting the values of + // settings such as `cluster.routing.rebalance.enable`) in order to remain in a + // balanced state. + // For example, if the requested allocation includes moving a shard from node1 + // to node2 then this may cause a shard to be moved from node2 back to node1 to + // even things out. + // + // The cluster can be set to disable allocations using the + // `cluster.routing.allocation.enable` setting. + // If allocations are disabled then the only allocations that will be performed + // are explicit ones given using the reroute command, and consequent allocations + // due to rebalancing. + // + // The cluster will attempt to allocate a shard a maximum of + // `index.allocation.max_retries` times in a row (defaults to `5`), before + // giving up and leaving the shard unallocated. + // This scenario can be caused by structural problems such as having an analyzer + // which refers to a stopwords file which doesn’t exist on all nodes. + // + // Once the problem has been corrected, allocation can be manually retried by + // calling the reroute API with the `?retry_failed` URI query parameter, which + // will attempt a single retry round for these shards. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html Reroute cluster_reroute.NewReroute - // Returns a comprehensive information about the state of the cluster. + // Get the cluster state. + // Get comprehensive information about the state of the cluster. + // + // The cluster state is an internal data structure which keeps track of a + // variety of information needed by every node, including the identity and + // attributes of the other nodes in the cluster; cluster-wide settings; index + // metadata, including the mapping and settings for each index; the location and + // status of every shard copy in the cluster. + // + // The elected master node ensures that every node in the cluster has a copy of + // the same cluster state. + // This API lets you retrieve a representation of this internal state for + // debugging or diagnostic purposes. + // You may need to consult the Elasticsearch source code to determine the + // precise meaning of the response. + // + // By default the API will route requests to the elected master node since this + // node is the authoritative source of cluster states. + // You can also retrieve the cluster state held on the node handling the API + // request by adding the `?local=true` query parameter. + // + // Elasticsearch may need to expend significant effort to compute a response to + // this API in larger clusters, and the response may comprise a very large + // quantity of data. + // If you use this API repeatedly, your cluster may become unstable. + // + // WARNING: The response is a representation of an internal data structure. + // Its format is not subject to the same compatibility guarantees as other more + // stable APIs and may change from version to version. + // Do not query this API using external monitoring tools. + // Instead, obtain the information you require using other more stable cluster + // APIs. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html State cluster_state.NewState - // Returns cluster statistics. - // It returns basic index metrics (shard numbers, store size, memory usage) and + // Get cluster statistics. + // Get basic index metrics (shard numbers, store size, memory usage) and // information about the current nodes that form the cluster (number, roles, os, // jvm versions, memory usage, cpu and installed plugins). // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html @@ -972,195 +1338,1252 @@ type Cluster struct { } type Connector struct { - // Updates the last_seen field in the connector, and sets it to current - // timestamp + // Check in a connector. + // + // Update the `last_seen` field in the connector and set it to the current + // timestamp. // https://www.elastic.co/guide/en/elasticsearch/reference/current/check-in-connector-api.html CheckIn connector_check_in.NewCheckIn - // Deletes a connector. + // Delete a connector. + // + // Removes a connector and associated sync jobs. + // This is a destructive action that is not recoverable. + // NOTE: This action doesn’t delete any API keys, ingest pipelines, or data + // indices associated with the connector. + // These need to be removed manually. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-api.html Delete connector_delete.NewDelete - // Retrieves a connector. + // Get a connector. + // + // Get the details about a connector. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-api.html Get connector_get.NewGet - // Updates last sync stats in the connector document + // Update the connector last sync stats. + // + // Update the fields related to the last sync of a connector. + // This action is used for analytics and monitoring. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-last-sync-api.html LastSync connector_last_sync.NewLastSync - // Returns existing connectors. + // Get all connectors. + // + // Get information about all connectors. // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-connector-api.html List connector_list.NewList - // Creates a connector. + // Create a connector. + // + // Connectors are Elasticsearch integrations that bring content from third-party + // data sources, which can be deployed on Elastic Cloud or hosted on your own + // infrastructure. + // Elastic managed connectors (Native connectors) are a managed service on + // Elastic Cloud. + // Self-managed connectors (Connector clients) are self-managed on your + // infrastructure. // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html Post connector_post.NewPost - // Creates or updates a connector. + // Create or update a connector. // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html Put connector_put.NewPut // Creates a secret for a Connector. // SecretPost connector_secret_post.NewSecretPost - // Cancels a connector sync job. + // Cancel a connector sync job. + // + // Cancel a connector sync job, which sets the status to cancelling and updates + // `cancellation_requested_at` to the current time. + // The connector service is then responsible for setting the status of connector + // sync jobs to cancelled. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cancel-connector-sync-job-api.html SyncJobCancel connector_sync_job_cancel.NewSyncJobCancel - // Deletes a connector sync job. + // Check in a connector sync job. + // Check in a connector sync job and set the `last_seen` field to the current + // time before updating it in the internal index. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/check-in-connector-sync-job-api.html + SyncJobCheckIn connector_sync_job_check_in.NewSyncJobCheckIn + // Claim a connector sync job. + // This action updates the job status to `in_progress` and sets the `last_seen` + // and `started_at` timestamps to the current time. + // Additionally, it can set the `sync_cursor` property for the sync job. + // + // This API is not intended for direct connector management by users. + // It supports the implementation of services that utilize the connector + // protocol to communicate with Elasticsearch. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/claim-connector-sync-job-api.html + SyncJobClaim connector_sync_job_claim.NewSyncJobClaim + // Delete a connector sync job. + // + // Remove a connector sync job and its associated data. + // This is a destructive action that is not recoverable. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-sync-job-api.html SyncJobDelete connector_sync_job_delete.NewSyncJobDelete - // Retrieves a connector sync job. + // Set a connector sync job error. + // Set the `error` field for a connector sync job and set its `status` to + // `error`. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/set-connector-sync-job-error-api.html + SyncJobError connector_sync_job_error.NewSyncJobError + // Get a connector sync job. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-sync-job-api.html SyncJobGet connector_sync_job_get.NewSyncJobGet - // Lists connector sync jobs. + // Get all connector sync jobs. + // + // Get information about all stored connector sync jobs listed by their creation + // date in ascending order. // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-connector-sync-jobs-api.html SyncJobList connector_sync_job_list.NewSyncJobList - // Creates a connector sync job. + // Create a connector sync job. + // + // Create a connector sync job document in the internal index and initialize its + // counters and timestamps with default values. // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-sync-job-api.html SyncJobPost connector_sync_job_post.NewSyncJobPost + // Set the connector sync job stats. + // Stats include: `deleted_document_count`, `indexed_document_count`, + // `indexed_document_volume`, and `total_document_count`. + // You can also update `last_seen`. + // This API is mainly used by the connector service for updating sync job + // information. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/set-connector-sync-job-stats-api.html + SyncJobUpdateStats connector_sync_job_update_stats.NewSyncJobUpdateStats + // Activate the connector draft filter. + // // Activates the valid draft filtering for a connector. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-api.html UpdateActiveFiltering connector_update_active_filtering.NewUpdateActiveFiltering - // Updates the API key id in the connector document + // Update the connector API key ID. + // + // Update the `api_key_id` and `api_key_secret_id` fields of a connector. + // You can specify the ID of the API key used for authorization and the ID of + // the connector secret where the API key is stored. + // The connector secret ID is required only for Elastic managed (native) + // connectors. + // Self-managed connectors (connector clients) do not use this field. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-api-key-id-api.html UpdateApiKeyId connector_update_api_key_id.NewUpdateApiKeyId - // Updates the configuration field in the connector document + // Update the connector configuration. + // + // Update the configuration field in the connector document. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-configuration-api.html UpdateConfiguration connector_update_configuration.NewUpdateConfiguration - // Updates the filtering field in the connector document + // Update the connector error field. + // + // Set the error field for the connector. + // If the error provided in the request body is non-null, the connector’s status + // is updated to error. + // Otherwise, if the error is reset to null, the connector status is updated to + // connected. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-error-api.html UpdateError connector_update_error.NewUpdateError - // Updates the filtering field in the connector document + // Update the connector features. + // Update the connector features in the connector document. + // This API can be used to control the following aspects of a connector: + // + // * document-level security + // * incremental syncs + // * advanced sync rules + // * basic sync rules + // + // Normally, the running connector service automatically manages these features. + // However, you can use this API to override the default behavior. + // + // To sync data using self-managed connectors, you need to deploy the Elastic + // connector service on your own infrastructure. + // This service runs automatically on Elastic Cloud for Elastic managed + // connectors. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-features-api.html + UpdateFeatures connector_update_features.NewUpdateFeatures + // Update the connector filtering. + // + // Update the draft filtering configuration of a connector and marks the draft + // validation state as edited. + // The filtering draft is activated once validated by the running Elastic + // connector service. + // The filtering property is used to configure sync rules (both basic and + // advanced) for a connector. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-api.html UpdateFiltering connector_update_filtering.NewUpdateFiltering - // Updates the draft filtering validation info for a connector. + // Update the connector draft filtering validation. + // + // Update the draft filtering validation info for a connector. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-validation-api.html UpdateFilteringValidation connector_update_filtering_validation.NewUpdateFilteringValidation - // Updates the index_name in the connector document + // Update the connector index name. + // + // Update the `index_name` field of a connector, specifying the index where the + // data ingested by the connector is stored. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-index-name-api.html UpdateIndexName connector_update_index_name.NewUpdateIndexName - // Updates the name and description fields in the connector document + // Update the connector name and description. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-name-description-api.html UpdateName connector_update_name.NewUpdateName - // Updates the is_native flag in the connector document + // Update the connector is_native flag. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html UpdateNative connector_update_native.NewUpdateNative - // Updates the pipeline field in the connector document + // Update the connector pipeline. + // + // When you create a new connector, the configuration of an ingest pipeline is + // populated with default settings. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-pipeline-api.html UpdatePipeline connector_update_pipeline.NewUpdatePipeline - // Updates the scheduling field in the connector document + // Update the connector scheduling. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-scheduling-api.html UpdateScheduling connector_update_scheduling.NewUpdateScheduling - // Updates the service type of the connector + // Update the connector service type. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-service-type-api.html UpdateServiceType connector_update_service_type.NewUpdateServiceType - // Updates the status of the connector + // Update the connector status. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-status-api.html UpdateStatus connector_update_status.NewUpdateStatus } type Core struct { // Bulk index or delete documents. - // Performs multiple indexing or delete operations in a single API call. + // Perform multiple `index`, `create`, `delete`, and `update` actions in a + // single request. // This reduces overhead and can greatly increase indexing speed. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To use the `create` action, you must have the `create_doc`, `create`, + // `index`, or `write` index privilege. Data streams support only the `create` + // action. + // * To use the `index` action, you must have the `create`, `index`, or `write` + // index privilege. + // * To use the `delete` action, you must have the `delete` or `write` index + // privilege. + // * To use the `update` action, you must have the `index` or `write` index + // privilege. + // * To automatically create a data stream or index with a bulk API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // * To make the result of a bulk operation visible to search using the + // `refresh` parameter, you must have the `maintenance` or `manage` index + // privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // The actions are specified in the request body using a newline delimited JSON + // (NDJSON) structure: + // + // ``` + // action_and_meta_data\n + // optional_source\n + // action_and_meta_data\n + // optional_source\n + // .... + // action_and_meta_data\n + // optional_source\n + // ``` + // + // The `index` and `create` actions expect a source on the next line and have + // the same semantics as the `op_type` parameter in the standard index API. + // A `create` action fails if a document with the same ID already exists in the + // target + // An `index` action adds or replaces a document as necessary. + // + // NOTE: Data streams support only the `create` action. + // To update or delete a document in a data stream, you must target the backing + // index containing the document. + // + // An `update` action expects that the partial doc, upsert, and script and its + // options are specified on the next line. + // + // A `delete` action does not expect a source on the next line and has the same + // semantics as the standard delete API. + // + // NOTE: The final line of data must end with a newline character (`\n`). + // Each newline character may be preceded by a carriage return (`\r`). + // When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header + // of `application/json` or `application/x-ndjson`. + // Because this format uses literal newline characters (`\n`) as delimiters, + // make sure that the JSON actions and sources are not pretty printed. + // + // If you provide a target in the request path, it is used for any actions that + // don't explicitly specify an `_index` argument. + // + // A note on the format: the idea here is to make processing as fast as + // possible. + // As some of the actions are redirected to other shards on other nodes, only + // `action_meta_data` is parsed on the receiving node side. + // + // Client libraries using this protocol should try and strive to do something + // similar on the client side, and reduce buffering as much as possible. + // + // There is no "correct" number of actions to perform in a single bulk request. + // Experiment with different settings to find the optimal size for your + // particular workload. + // Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by + // default so clients must ensure that no request exceeds this size. + // It is not possible to index a single document that exceeds the size limit, so + // you must pre-process any such documents into smaller pieces before sending + // them to Elasticsearch. + // For instance, split documents into pages or chapters before indexing them, or + // store raw binary data in a system outside Elasticsearch and replace the raw + // data with a link to the external system in the documents that you send to + // Elasticsearch. + // + // **Client suppport for bulk requests** + // + // Some of the officially supported clients provide helpers to assist with bulk + // requests and reindexing: + // + // * Go: Check out `esutil.BulkIndexer` + // * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and + // `Search::Elasticsearch::Client::5_0::Scroll` + // * Python: Check out `elasticsearch.helpers.*` + // * JavaScript: Check out `client.helpers.*` + // * .NET: Check out `BulkAllObservable` + // * PHP: Check out bulk indexing. + // + // **Submitting bulk requests with cURL** + // + // If you're providing text file input to `curl`, you must use the + // `--data-binary` flag instead of plain `-d`. + // The latter doesn't preserve newlines. For example: + // + // ``` + // $ cat requests + // { "index" : { "_index" : "test", "_id" : "1" } } + // { "field1" : "value1" } + // $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk + // --data-binary "@requests"; echo + // {"took":7, "errors": false, + // "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} + // ``` + // + // **Optimistic concurrency control** + // + // Each `index` and `delete` action within a bulk API call may include the + // `if_seq_no` and `if_primary_term` parameters in their respective action and + // meta data lines. + // The `if_seq_no` and `if_primary_term` parameters control how operations are + // run, based on the last modification to existing documents. See Optimistic + // concurrency control for more details. + // + // **Versioning** + // + // Each bulk item can include the version value using the `version` field. + // It automatically follows the behavior of the index or delete operation based + // on the `_version` mapping. + // It also support the `version_type`. + // + // **Routing** + // + // Each bulk item can include the routing value using the `routing` field. + // It automatically follows the behavior of the index or delete operation based + // on the `_routing` mapping. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Wait for active shards** + // + // When making bulk calls, you can set the `wait_for_active_shards` parameter to + // require a minimum number of shard copies to be active before starting to + // process the bulk request. + // + // **Refresh** + // + // Control when the changes made by this request are visible to search. + // + // NOTE: Only the shards that receive the bulk request will be affected by + // refresh. + // Imagine a `_bulk?refresh=wait_for` request with three documents in it that + // happen to be routed to different shards in an index with five shards. + // The request will only wait for those three shards to refresh. + // The other two shards that make up the index do not participate in the `_bulk` + // request at all. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Bulk core_bulk.NewBulk - // Clears the search context and results for a scrolling search. + // Clear a scrolling search. + // Clear the search context and results for a scrolling search. // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html ClearScroll core_clear_scroll.NewClearScroll - // Closes a point-in-time. + // Close a point in time. + // A point in time must be opened explicitly before being used in search + // requests. + // The `keep_alive` parameter tells Elasticsearch how long it should persist. + // A point in time is automatically closed when the `keep_alive` period has + // elapsed. + // However, keeping points in time has a cost; close them as soon as they are no + // longer required for search requests. // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html ClosePointInTime core_close_point_in_time.NewClosePointInTime - // Returns number of documents matching a query. + // Count search results. + // Get the number of documents matching a query. + // + // The query can be provided either by using a simple query string as a + // parameter, or by defining Query DSL within the request body. + // The query is optional. When no query is provided, the API uses `match_all` to + // count all the documents. + // + // The count API supports multi-target syntax. You can run a single count API + // search across multiple data streams and indices. + // + // The operation is broadcast across all shards. + // For each shard ID group, a replica is chosen and the search is run against + // it. + // This means that replicas increase the scalability of the count. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html Count core_count.NewCount - // Index a document. - // Adds a JSON document to the specified data stream or index and makes it - // searchable. - // If the target is an index and the document already exists, the request - // updates the document and increments its version. + // Create a new document in the index. + // + // You can index a new JSON document with the `//_doc/` or + // `//_create/<_id>` APIs + // Using `_create` guarantees that the document is indexed only if it does not + // already exist. + // It returns a 409 response when a document with a same ID already exists in + // the index. + // To update an existing document, you must use the `//_doc/` API. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To add a document using the `PUT //_create/<_id>` or `POST + // //_create/<_id>` request formats, you must have the `create_doc`, + // `create`, `index`, or `write` index privilege. + // * To automatically create a data stream or index with this API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // **Automatically create data streams and indices** + // + // If the request's target doesn't exist and matches an index template with a + // `data_stream` definition, the index operation automatically creates the data + // stream. + // + // If the target doesn't exist and doesn't match a data stream template, the + // operation automatically creates the index and applies any matching index + // templates. + // + // NOTE: Elasticsearch includes several built-in index templates. To avoid + // naming collisions with these templates, refer to index pattern documentation. + // + // If no mapping exists, the index operation creates a dynamic mapping. + // By default, new fields and objects are automatically added to the mapping if + // needed. + // + // Automatic index creation is controlled by the `action.auto_create_index` + // setting. + // If it is `true`, any index can be created automatically. + // You can modify this setting to explicitly allow or block automatic creation + // of indices that match specified patterns or set it to `false` to turn off + // automatic index creation entirely. + // Specify a comma-separated list of patterns you want to allow or prefix each + // pattern with `+` or `-` to indicate whether it should be allowed or blocked. + // When a list is specified, the default behaviour is to disallow. + // + // NOTE: The `action.auto_create_index` setting affects the automatic creation + // of indices only. + // It does not affect the creation of data streams. + // + // **Routing** + // + // By default, shard placement — or routing — is controlled by using a hash of + // the document's ID value. + // For more explicit control, the value fed into the hash function used by the + // router can be directly specified on a per-operation basis using the `routing` + // parameter. + // + // When setting up explicit mapping, you can also use the `_routing` field to + // direct the index operation to extract the routing value from the document + // itself. + // This does come at the (very minimal) cost of an additional document parsing + // pass. + // If the `_routing` mapping is defined and set to be required, the index + // operation will fail if no routing value is provided or extracted. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Distributed** + // + // The index operation is directed to the primary shard based on its route and + // performed on the actual node containing this shard. + // After the primary shard completes the operation, if needed, the update is + // distributed to applicable replicas. + // + // **Active shards** + // + // To improve the resiliency of writes to the system, indexing operations can be + // configured to wait for a certain number of active shard copies before + // proceeding with the operation. + // If the requisite number of active shard copies are not available, then the + // write operation must wait and retry, until either the requisite shard copies + // have started or a timeout occurs. + // By default, write operations only wait for the primary shards to be active + // before proceeding (that is to say `wait_for_active_shards` is `1`). + // This default can be overridden in the index settings dynamically by setting + // `index.write.wait_for_active_shards`. + // To alter this behavior per operation, use the `wait_for_active_shards + // request` parameter. + // + // Valid values are all or any positive integer up to the total number of + // configured copies per shard in the index (which is `number_of_replicas`+1). + // Specifying a negative value or a number greater than the number of shard + // copies will throw an error. + // + // For example, suppose you have a cluster of three nodes, A, B, and C and you + // create an index index with the number of replicas set to 3 (resulting in 4 + // shard copies, one more copy than there are nodes). + // If you attempt an indexing operation, by default the operation will only + // ensure the primary copy of each shard is available before proceeding. + // This means that even if B and C went down and A hosted the primary shard + // copies, the indexing operation would still proceed with only one copy of the + // data. + // If `wait_for_active_shards` is set on the request to `3` (and all three nodes + // are up), the indexing operation will require 3 active shard copies before + // proceeding. + // This requirement should be met because there are 3 active nodes in the + // cluster, each one holding a copy of the shard. + // However, if you set `wait_for_active_shards` to `all` (or to `4`, which is + // the same in this situation), the indexing operation will not proceed as you + // do not have all 4 copies of each shard active in the index. + // The operation will timeout unless a new node is brought up in the cluster to + // host the fourth copy of the shard. + // + // It is important to note that this setting greatly reduces the chances of the + // write operation not writing to the requisite number of shard copies, but it + // does not completely eliminate the possibility, because this check occurs + // before the write operation starts. + // After the write operation is underway, it is still possible for replication + // to fail on any number of shard copies but still succeed on the primary. + // The `_shards` section of the API response reveals the number of shard copies + // on which replication succeeded and failed. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html Create core_create.NewCreate // Delete a document. - // Removes a JSON document from the specified index. + // + // Remove a JSON document from the specified index. + // + // NOTE: You cannot send deletion requests directly to a data stream. + // To delete a document in a data stream, you must target the backing index + // containing the document. + // + // **Optimistic concurrency control** + // + // Delete operations can be made conditional and only be performed if the last + // modification to the document was assigned the sequence number and primary + // term specified by the `if_seq_no` and `if_primary_term` parameters. + // If a mismatch is detected, the operation will result in a + // `VersionConflictException` and a status code of `409`. + // + // **Versioning** + // + // Each document indexed is versioned. + // When deleting a document, the version can be specified to make sure the + // relevant document you are trying to delete is actually being deleted and it + // has not changed in the meantime. + // Every write operation run on a document, deletes included, causes its version + // to be incremented. + // The version number of a deleted document remains available for a short time + // after deletion to allow for control of concurrent operations. + // The length of time for which a deleted document's version remains available + // is determined by the `index.gc_deletes` index setting. + // + // **Routing** + // + // If routing is used during indexing, the routing value also needs to be + // specified to delete a document. + // + // If the `_routing` mapping is set to `required` and no routing value is + // specified, the delete API throws a `RoutingMissingException` and rejects the + // request. + // + // For example: + // + // ``` + // DELETE /my-index-000001/_doc/1?routing=shard-1 + // ``` + // + // This request deletes the document with ID 1, but it is routed based on the + // user. + // The document is not deleted if the correct routing is not specified. + // + // **Distributed** + // + // The delete operation gets hashed into a specific shard ID. + // It then gets redirected into the primary shard within that ID group and + // replicated (if needed) to shard replicas within that ID group. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html Delete core_delete.NewDelete // Delete documents. + // // Deletes documents that match the specified query. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or alias: + // + // * `read` + // * `delete` or `write` + // + // You can specify the query criteria in the request URI or the request body + // using the same syntax as the search API. + // When you submit a delete by query request, Elasticsearch gets a snapshot of + // the data stream or index when it begins processing the request and deletes + // matching documents using internal versioning. + // If a document changes between the time that the snapshot is taken and the + // delete operation is processed, it results in a version conflict and the + // delete operation fails. + // + // NOTE: Documents with a version equal to 0 cannot be deleted using delete by + // query because internal versioning does not support 0 as a valid version + // number. + // + // While processing a delete by query request, Elasticsearch performs multiple + // search requests sequentially to find all of the matching documents to delete. + // A bulk delete request is performed for each batch of matching documents. + // If a search or bulk request is rejected, the requests are retried up to 10 + // times, with exponential back off. + // If the maximum retry limit is reached, processing halts and all failed + // requests are returned in the response. + // Any delete requests that completed successfully still stick, they are not + // rolled back. + // + // You can opt to count version conflicts instead of halting and returning by + // setting `conflicts` to `proceed`. + // Note that if you opt to count version conflicts the operation could attempt + // to delete more documents from the source than `max_docs` until it has + // successfully deleted `max_docs documents`, or it has gone through every + // document in the source query. + // + // **Throttling delete requests** + // + // To control the rate at which delete by query issues batches of delete + // operations, you can set `requests_per_second` to any positive decimal number. + // This pads each batch with a wait time to throttle the rate. + // Set `requests_per_second` to `-1` to disable throttling. + // + // Throttling uses a wait time between batches so that the internal scroll + // requests can be given a timeout that takes the request padding into account. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is `1000`, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single `_bulk` request, large batch sizes + // cause Elasticsearch to create many requests and wait before starting the next + // set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Delete by query supports sliced scroll to parallelize the delete process. + // This can improve efficiency and provide a convenient way to break the request + // down into smaller parts. + // + // Setting `slices` to `auto` lets Elasticsearch choose the number of slices to + // use. + // This setting will use one slice per shard, up to a certain limit. + // If there are multiple source data streams or indices, it will choose the + // number of slices based on the index or backing index with the smallest number + // of shards. + // Adding slices to the delete by query operation creates sub-requests which + // means it has some quirks: + // + // * You can see these requests in the tasks APIs. These sub-requests are + // "child" tasks of the task for the request with slices. + // * Fetching the status of the task for the request with slices only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with `slices` will cancel each sub-request. + // * Due to the nature of `slices` each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // `slices` are distributed proportionally to each sub-request. Combine that + // with the earlier point about distribution being uneven and you should + // conclude that using `max_docs` with `slices` might not result in exactly + // `max_docs` documents being deleted. + // * Each sub-request gets a slightly different snapshot of the source data + // stream or index though these are all taken at approximately the same time. + // + // If you're slicing manually or otherwise tuning automatic slicing, keep in + // mind that: + // + // * Query performance is most efficient when the number of slices is equal to + // the number of shards in the index or backing index. If that number is large + // (for example, 500), choose a lower number as too many `slices` hurts + // performance. Setting `slices` higher than the number of shards generally does + // not improve efficiency and adds overhead. + // * Delete performance scales linearly across available resources with the + // number of slices. + // + // Whether query or delete performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Cancel a delete by query operation** + // + // Any delete by query can be canceled using the task cancel API. For example: + // + // ``` + // POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel + // ``` + // + // The task ID can be found by using the get tasks API. + // + // Cancellation should happen quickly but might take a few seconds. + // The get task status API will continue to list the delete by query task until + // this task checks that it has been cancelled and terminates itself. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html DeleteByQuery core_delete_by_query.NewDeleteByQuery - // Changes the number of requests per second for a particular Delete By Query + // Throttle a delete by query operation. + // + // Change the number of requests per second for a particular delete by query // operation. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html + // Rethrottling that speeds up the query takes effect immediately but + // rethrotting that slows down the query takes effect after completing the + // current batch to prevent scroll timeouts. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html#docs-delete-by-query-rethrottle DeleteByQueryRethrottle core_delete_by_query_rethrottle.NewDeleteByQueryRethrottle // Delete a script or search template. // Deletes a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-stored-script-api.html DeleteScript core_delete_script.NewDeleteScript // Check a document. - // Checks if a specified document exists. + // + // Verify that a document exists. + // For example, check to see if a document with the `_id` 0 exists: + // + // ``` + // HEAD my-index-000001/_doc/0 + // ``` + // + // If the document exists, the API returns a status code of `200 - OK`. + // If the document doesn’t exist, the API returns `404 - Not Found`. + // + // **Versioning support** + // + // You can use the `version` parameter to check the document only if its current + // version is equal to the specified one. + // + // Internally, Elasticsearch has marked the old document as deleted and added an + // entirely new document. + // The old version of the document doesn't disappear immediately, although you + // won't be able to access it. + // Elasticsearch cleans up deleted documents in the background as you continue + // to index more data. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html Exists core_exists.NewExists // Check for a document source. - // Checks if a document's `_source` is stored. + // + // Check whether a document source exists in an index. + // For example: + // + // ``` + // HEAD my-index-000001/_source/1 + // ``` + // + // A document's source is not available if it is disabled in the mapping. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html ExistsSource core_exists_source.NewExistsSource // Explain a document match result. - // Returns information about why a specific document matches, or doesn’t match, - // a query. + // Get information about why a specific document matches, or doesn't match, a + // query. + // It computes a score explanation for a query and a specific document. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html Explain core_explain.NewExplain - // The field capabilities API returns the information about the capabilities of - // fields among multiple indices. - // The field capabilities API returns runtime fields like any other field. For - // example, a runtime field with a type - // of keyword is returned as any other field that belongs to the `keyword` - // family. + // Get the field capabilities. + // + // Get information about the capabilities of fields among multiple indices. + // + // For data streams, the API returns field capabilities among the stream’s + // backing indices. + // It returns runtime fields like any other field. + // For example, a runtime field with a type of keyword is returned the same as + // any other field that belongs to the `keyword` family. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html FieldCaps core_field_caps.NewFieldCaps // Get a document by its ID. - // Retrieves the document with the specified ID from an index. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html - Get core_get.NewGet + // + // Get a document and its source or stored fields from an index. + // + // By default, this API is realtime and is not affected by the refresh rate of + // the index (when data will become visible for search). + // In the case where stored fields are requested with the `stored_fields` + // parameter and the document has been updated but is not yet refreshed, the API + // will have to parse and analyze the source to extract the stored fields. + // To turn off realtime behavior, set the `realtime` parameter to false. + // + // **Source filtering** + // + // By default, the API returns the contents of the `_source` field unless you + // have used the `stored_fields` parameter or the `_source` field is turned off. + // You can turn off `_source` retrieval by using the `_source` parameter: + // + // ``` + // GET my-index-000001/_doc/0?_source=false + // ``` + // + // If you only need one or two fields from the `_source`, use the + // `_source_includes` or `_source_excludes` parameters to include or filter out + // particular fields. + // This can be helpful with large documents where partial retrieval can save on + // network overhead + // Both parameters take a comma separated list of fields or wildcard + // expressions. + // For example: + // + // ``` + // GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities + // ``` + // + // If you only want to specify includes, you can use a shorter notation: + // + // ``` + // GET my-index-000001/_doc/0?_source=*.id + // ``` + // + // **Routing** + // + // If routing is used during indexing, the routing value also needs to be + // specified to retrieve a document. + // For example: + // + // ``` + // GET my-index-000001/_doc/2?routing=user1 + // ``` + // + // This request gets the document with ID 2, but it is routed based on the user. + // The document is not fetched if the correct routing is not specified. + // + // **Distributed** + // + // The GET operation is hashed into a specific shard ID. + // It is then redirected to one of the replicas within that shard ID and returns + // the result. + // The replicas are the primary shard and its replicas within that shard ID + // group. + // This means that the more replicas you have, the better your GET scaling will + // be. + // + // **Versioning support** + // + // You can use the `version` parameter to retrieve the document only if its + // current version is equal to the specified one. + // + // Internally, Elasticsearch has marked the old document as deleted and added an + // entirely new document. + // The old version of the document doesn't disappear immediately, although you + // won't be able to access it. + // Elasticsearch cleans up deleted documents in the background as you continue + // to index more data. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html + Get core_get.NewGet // Get a script or search template. // Retrieves a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-stored-script-api.html GetScript core_get_script.NewGetScript - // Returns all script contexts. - // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html + // Get script contexts. + // + // Get a list of supported script contexts and their methods. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-script-contexts-api.html GetScriptContext core_get_script_context.NewGetScriptContext - // Returns available script types, languages and contexts - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // Get script languages. + // + // Get a list of available script types, languages, and contexts. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-script-languages-api.html GetScriptLanguages core_get_script_languages.NewGetScriptLanguages // Get a document's source. - // Returns the source of a document. + // + // Get the source of a document. + // For example: + // + // ``` + // GET my-index-000001/_source/1 + // ``` + // + // You can use the source filtering parameters to control which parts of the + // `_source` are returned: + // + // ``` + // GET + // my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities + // ``` // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html GetSource core_get_source.NewGetSource - // Returns the health of the cluster. + // Get the cluster health. + // Get a report with the health status of an Elasticsearch cluster. + // The report contains a list of indicators that compose Elasticsearch + // functionality. + // + // Each indicator has a health status of: green, unknown, yellow or red. + // The indicator will provide an explanation and metadata describing the reason + // for its current health status. + // + // The cluster’s status is controlled by the worst indicator status. + // + // In the event that an indicator’s status is non-green, a list of impacts may + // be present in the indicator result which detail the functionalities that are + // negatively affected by the health issue. + // Each impact carries with it a severity level, an area of the system that is + // affected, and a simple description of the impact on the system. + // + // Some health indicators can determine the root cause of a health problem and + // prescribe a set of steps that can be performed in order to improve the health + // of the system. + // The root cause and remediation steps are encapsulated in a diagnosis. + // A diagnosis contains a cause detailing a root cause analysis, an action + // containing a brief description of the steps to take to fix the problem, the + // list of affected resources (if applicable), and a detailed step-by-step + // troubleshooting guide to fix the diagnosed problem. + // + // NOTE: The health indicators perform root cause analysis of non-green health + // statuses. This can be computationally expensive when called frequently. + // When setting up automated polling of the API for health status, set verbose + // to false to disable the more expensive analysis logic. // https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html HealthReport core_health_report.NewHealthReport - // Index a document. - // Adds a JSON document to the specified data stream or index and makes it + // Create or update a document in an index. + // + // Add a JSON document to the specified data stream or index and make it // searchable. // If the target is an index and the document already exists, the request // updates the document and increments its version. + // + // NOTE: You cannot use this API to send update requests for existing documents + // in a data stream. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To add or overwrite a document using the `PUT //_doc/<_id>` request + // format, you must have the `create`, `index`, or `write` index privilege. + // * To add a document using the `POST //_doc/` request format, you must + // have the `create_doc`, `create`, `index`, or `write` index privilege. + // * To automatically create a data stream or index with this API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // NOTE: Replica shards might not all be started when an indexing operation + // returns successfully. + // By default, only the primary is required. Set `wait_for_active_shards` to + // change this default behavior. + // + // **Automatically create data streams and indices** + // + // If the request's target doesn't exist and matches an index template with a + // `data_stream` definition, the index operation automatically creates the data + // stream. + // + // If the target doesn't exist and doesn't match a data stream template, the + // operation automatically creates the index and applies any matching index + // templates. + // + // NOTE: Elasticsearch includes several built-in index templates. To avoid + // naming collisions with these templates, refer to index pattern documentation. + // + // If no mapping exists, the index operation creates a dynamic mapping. + // By default, new fields and objects are automatically added to the mapping if + // needed. + // + // Automatic index creation is controlled by the `action.auto_create_index` + // setting. + // If it is `true`, any index can be created automatically. + // You can modify this setting to explicitly allow or block automatic creation + // of indices that match specified patterns or set it to `false` to turn off + // automatic index creation entirely. + // Specify a comma-separated list of patterns you want to allow or prefix each + // pattern with `+` or `-` to indicate whether it should be allowed or blocked. + // When a list is specified, the default behaviour is to disallow. + // + // NOTE: The `action.auto_create_index` setting affects the automatic creation + // of indices only. + // It does not affect the creation of data streams. + // + // **Optimistic concurrency control** + // + // Index operations can be made conditional and only be performed if the last + // modification to the document was assigned the sequence number and primary + // term specified by the `if_seq_no` and `if_primary_term` parameters. + // If a mismatch is detected, the operation will result in a + // `VersionConflictException` and a status code of `409`. + // + // **Routing** + // + // By default, shard placement — or routing — is controlled by using a hash of + // the document's ID value. + // For more explicit control, the value fed into the hash function used by the + // router can be directly specified on a per-operation basis using the `routing` + // parameter. + // + // When setting up explicit mapping, you can also use the `_routing` field to + // direct the index operation to extract the routing value from the document + // itself. + // This does come at the (very minimal) cost of an additional document parsing + // pass. + // If the `_routing` mapping is defined and set to be required, the index + // operation will fail if no routing value is provided or extracted. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Distributed** + // + // The index operation is directed to the primary shard based on its route and + // performed on the actual node containing this shard. + // After the primary shard completes the operation, if needed, the update is + // distributed to applicable replicas. + // + // **Active shards** + // + // To improve the resiliency of writes to the system, indexing operations can be + // configured to wait for a certain number of active shard copies before + // proceeding with the operation. + // If the requisite number of active shard copies are not available, then the + // write operation must wait and retry, until either the requisite shard copies + // have started or a timeout occurs. + // By default, write operations only wait for the primary shards to be active + // before proceeding (that is to say `wait_for_active_shards` is `1`). + // This default can be overridden in the index settings dynamically by setting + // `index.write.wait_for_active_shards`. + // To alter this behavior per operation, use the `wait_for_active_shards + // request` parameter. + // + // Valid values are all or any positive integer up to the total number of + // configured copies per shard in the index (which is `number_of_replicas`+1). + // Specifying a negative value or a number greater than the number of shard + // copies will throw an error. + // + // For example, suppose you have a cluster of three nodes, A, B, and C and you + // create an index index with the number of replicas set to 3 (resulting in 4 + // shard copies, one more copy than there are nodes). + // If you attempt an indexing operation, by default the operation will only + // ensure the primary copy of each shard is available before proceeding. + // This means that even if B and C went down and A hosted the primary shard + // copies, the indexing operation would still proceed with only one copy of the + // data. + // If `wait_for_active_shards` is set on the request to `3` (and all three nodes + // are up), the indexing operation will require 3 active shard copies before + // proceeding. + // This requirement should be met because there are 3 active nodes in the + // cluster, each one holding a copy of the shard. + // However, if you set `wait_for_active_shards` to `all` (or to `4`, which is + // the same in this situation), the indexing operation will not proceed as you + // do not have all 4 copies of each shard active in the index. + // The operation will timeout unless a new node is brought up in the cluster to + // host the fourth copy of the shard. + // + // It is important to note that this setting greatly reduces the chances of the + // write operation not writing to the requisite number of shard copies, but it + // does not completely eliminate the possibility, because this check occurs + // before the write operation starts. + // After the write operation is underway, it is still possible for replication + // to fail on any number of shard copies but still succeed on the primary. + // The `_shards` section of the API response reveals the number of shard copies + // on which replication succeeded and failed. + // + // **No operation (noop) updates** + // + // When updating a document by using this API, a new version of the document is + // always created even if the document hasn't changed. + // If this isn't acceptable use the `_update` API with `detect_noop` set to + // `true`. + // The `detect_noop` option isn't available on this API because it doesn’t fetch + // the old source and isn't able to compare it against the new source. + // + // There isn't a definitive rule for when noop updates aren't acceptable. + // It's a combination of lots of factors like how frequently your data source + // sends updates that are actually noops and how many queries per second + // Elasticsearch runs on the shard receiving the updates. + // + // **Versioning** + // + // Each indexed document is given a version number. + // By default, internal versioning is used that starts at 1 and increments with + // each update, deletes included. + // Optionally, the version number can be set to an external value (for example, + // if maintained in a database). + // To enable this functionality, `version_type` should be set to `external`. + // The value provided must be a numeric, long value greater than or equal to 0, + // and less than around `9.2e+18`. + // + // NOTE: Versioning is completely real time, and is not affected by the near + // real time aspects of search operations. + // If no version is provided, the operation runs without any version checks. + // + // When using the external version type, the system checks to see if the version + // number passed to the index request is greater than the version of the + // currently stored document. + // If true, the document will be indexed and the new version number used. + // If the value provided is less than or equal to the stored document's version + // number, a version conflict will occur and the index operation will fail. For + // example: + // + // ``` + // PUT my-index-000001/_doc/1?version=2&version_type=external + // { + // "user": { + // "id": "elkbee" + // } + // } + // + // In this example, the operation will succeed since the supplied version of 2 + // is higher than the current document version of 1. + // If the document was already updated and its version was set to 2 or higher, + // the indexing command will fail and result in a conflict (409 HTTP status + // code). + // + // A nice side effect is that there is no need to maintain strict ordering of + // async indexing operations run as a result of changes to a source database, as + // long as version numbers from the source database are used. + // Even the simple case of updating the Elasticsearch index using data from a + // database is simplified if external versioning is used, as only the latest + // version will be used if the index operations arrive out of order. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html Index core_index.NewIndex // Get cluster info. - // Returns basic information about the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html + // Get basic build, version, and cluster information. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-api-root.html Info core_info.NewInfo - // Performs a kNN search. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html + // Run a knn search. + // + // NOTE: The kNN search API has been replaced by the `knn` option in the search + // API. + // + // Perform a k-nearest neighbor (kNN) search on a dense_vector field and return + // the matching documents. + // Given a query vector, the API finds the k closest vectors and returns those + // documents as search hits. + // + // Elasticsearch uses the HNSW algorithm to support efficient kNN search. + // Like most kNN algorithms, HNSW is an approximate method that sacrifices + // result accuracy for improved search speed. + // This means the results returned are not always the true k closest neighbors. + // + // The kNN search API supports restricting the search using a filter. + // The search will return the top k documents that also match the filter query. + // + // A kNN search response has the exact same structure as a search API response. + // However, certain sections have a meaning specific to kNN search: + // + // * The document `_score` is determined by the similarity between the query and + // document vector. + // * The `hits.total` object contains the total number of nearest neighbor + // candidates considered, which is `num_candidates * num_shards`. The + // `hits.total.relation` will always be `eq`, indicating an exact value. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html KnnSearch core_knn_search.NewKnnSearch - // Allows to get multiple documents in one request. + // Get multiple documents. + // + // Get multiple JSON documents by ID from one or more indices. + // If you specify an index in the request URI, you only need to specify the + // document IDs in the request body. + // To ensure fast responses, this multi get (mget) API responds with partial + // results if one or more shards fail. + // + // **Filter source fields** + // + // By default, the `_source` field is returned for every document (if stored). + // Use the `_source` and `_source_include` or `source_exclude` attributes to + // filter what fields are returned for a particular document. + // You can include the `_source`, `_source_includes`, and `_source_excludes` + // query parameters in the request URI to specify the defaults to use when there + // are no per-document instructions. + // + // **Get stored fields** + // + // Use the `stored_fields` attribute to specify the set of stored fields you + // want to retrieve. + // Any requested fields that are not stored are ignored. + // You can include the `stored_fields` query parameter in the request URI to + // specify the defaults to use when there are no per-document instructions. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html Mget core_mget.NewMget - // Allows to execute several search operations in one request. + // Run multiple searches. + // + // The format of the request is similar to the bulk API format and makes use of + // the newline delimited JSON (NDJSON) format. + // The structure is as follows: + // + // ``` + // header\n + // body\n + // header\n + // body\n + // ``` + // + // This structure is specifically optimized to reduce parsing if a specific + // search ends up redirected to another node. + // + // IMPORTANT: The final line of data must end with a newline character `\n`. + // Each newline character may be preceded by a carriage return `\r`. + // When sending requests to this endpoint the `Content-Type` header should be + // set to `application/x-ndjson`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html Msearch core_msearch.NewMsearch - // Runs multiple templated searches with a single request. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + // Run multiple templated searches. + // + // Run multiple templated searches with a single request. + // If you are providing a text file or text input to `curl`, use the + // `--data-binary` flag instead of `-d` to preserve newlines. + // For example: + // + // ``` + // $ cat requests + // { "index": "my-index" } + // { "id": "my-search-template", "params": { "query_string": "hello world", + // "from": 0, "size": 10 }} + // { "index": "my-other-index" } + // { "id": "my-other-search-template", "params": { "query_type": "match_all" }} + // + // $ curl -H "Content-Type: application/x-ndjson" -XGET + // localhost:9200/_msearch/template --data-binary "@requests"; echo + // ``` + // https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-search-template.html MsearchTemplate core_msearch_template.NewMsearchTemplate - // Returns multiple termvectors in one request. + // Get multiple term vectors. + // + // Get multiple term vectors with a single request. + // You can specify existing documents by index and ID or provide artificial + // documents in the body of the request. + // You can specify the index in the request body or request URI. + // The response contains a `docs` array with all the fetched termvectors. + // Each element has the structure provided by the termvectors API. + // + // **Artificial documents** + // + // You can also use `mtermvectors` to generate term vectors for artificial + // documents provided in the body of the request. + // The mapping used is determined by the specified `_index`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html Mtermvectors core_mtermvectors.NewMtermvectors - // A search request by default executes against the most recent visible data of - // the target indices, + // Open a point in time. + // + // A search request by default runs against the most recent visible data of the + // target indices, // which is called point in time. Elasticsearch pit (point in time) is a // lightweight view into the // state of the data as it existed when initiated. In some cases, it’s preferred @@ -1170,68 +2593,726 @@ type Core struct { // `search_after` requests, then the results of those requests might not be // consistent as changes happening // between searches are only visible to the more recent point in time. + // + // A point in time must be opened explicitly before being used in search + // requests. + // + // A subsequent search request with the `pit` parameter must not specify + // `index`, `routing`, or `preference` values as these parameters are copied + // from the point in time. + // + // Just like regular searches, you can use `from` and `size` to page through + // point in time search results, up to the first 10,000 hits. + // If you want to retrieve more hits, use PIT with `search_after`. + // + // IMPORTANT: The open point in time request and each subsequent search request + // can return different identifiers; always use the most recently received ID + // for the next search request. + // + // When a PIT that contains shard failures is used in a search request, the + // missing are always reported in the search response as a + // `NoShardAvailableActionException` exception. + // To get rid of these exceptions, a new PIT needs to be created so that shards + // missing from the previous PIT can be handled, assuming they become available + // in the meantime. + // + // **Keeping point in time alive** + // + // The `keep_alive` parameter, which is passed to a open point in time request + // and search request, extends the time to live of the corresponding point in + // time. + // The value does not need to be long enough to process all data — it just needs + // to be long enough for the next request. + // + // Normally, the background merge process optimizes the index by merging + // together smaller segments to create new, bigger segments. + // Once the smaller segments are no longer needed they are deleted. + // However, open point-in-times prevent the old segments from being deleted + // since they are still in use. + // + // TIP: Keeping older segments alive means that more disk space and file handles + // are needed. + // Ensure that you have configured your nodes to have ample free file handles. + // + // Additionally, if a segment contains deleted or updated documents then the + // point in time must keep track of whether each document in the segment was + // live at the time of the initial search request. + // Ensure that your nodes have sufficient heap space if you have many open + // point-in-times on an index that is subject to ongoing deletes or updates. + // Note that a point-in-time doesn't prevent its associated indices from being + // deleted. + // You can check how many point-in-times (that is, search contexts) are open + // with the nodes stats API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html OpenPointInTime core_open_point_in_time.NewOpenPointInTime // Ping the cluster. - // Returns whether the cluster is running. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html + // Get information about whether the cluster is running. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html Ping core_ping.NewPing // Create or update a script or search template. // Creates or updates a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-stored-script-api.html PutScript core_put_script.NewPutScript - // Enables you to evaluate the quality of ranked search results over a set of - // typical search queries. + // Evaluate ranked search results. + // + // Evaluate the quality of ranked search results over a set of typical search + // queries. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html RankEval core_rank_eval.NewRankEval // Reindex documents. - // Copies documents from a source to a destination. The source can be any - // existing index, alias, or data stream. The destination must differ from the - // source. For example, you cannot reindex a data stream into itself. + // + // Copy documents from a source to a destination. + // You can copy all documents to the destination index or reindex a subset of + // the documents. + // The source can be any existing index, alias, or data stream. + // The destination must differ from the source. + // For example, you cannot reindex a data stream into itself. + // + // IMPORTANT: Reindex requires `_source` to be enabled for all documents in the + // source. + // The destination should be configured as wanted before calling the reindex + // API. + // Reindex does not copy the settings from the source or its associated + // template. + // Mappings, shard counts, and replicas, for example, must be configured ahead + // of time. + // + // If the Elasticsearch security features are enabled, you must have the + // following security privileges: + // + // * The `read` index privilege for the source data stream, index, or alias. + // * The `write` index privilege for the destination data stream, index, or + // index alias. + // * To automatically create a data stream or index with a reindex API request, + // you must have the `auto_configure`, `create_index`, or `manage` index + // privilege for the destination data stream, index, or alias. + // * If reindexing from a remote cluster, the `source.remote.user` must have the + // `monitor` cluster privilege and the `read` index privilege for the source + // data stream, index, or alias. + // + // If reindexing from a remote cluster, you must explicitly allow the remote + // host in the `reindex.remote.whitelist` setting. + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // The `dest` element can be configured like the index API to control optimistic + // concurrency control. + // Omitting `version_type` or setting it to `internal` causes Elasticsearch to + // blindly dump documents into the destination, overwriting any that happen to + // have the same ID. + // + // Setting `version_type` to `external` causes Elasticsearch to preserve the + // `version` from the source, create any documents that are missing, and update + // any documents that have an older version in the destination than they do in + // the source. + // + // Setting `op_type` to `create` causes the reindex API to create only missing + // documents in the destination. + // All existing documents will cause a version conflict. + // + // IMPORTANT: Because data streams are append-only, any reindex request to a + // destination data stream must have an `op_type` of `create`. + // A reindex can only add new documents to a destination data stream. + // It cannot update existing documents in a destination data stream. + // + // By default, version conflicts abort the reindex process. + // To continue reindexing if there are conflicts, set the `conflicts` request + // body property to `proceed`. + // In this case, the response includes a count of the version conflicts that + // were encountered. + // Note that the handling of other error types is unaffected by the `conflicts` + // property. + // Additionally, if you opt to count version conflicts, the operation could + // attempt to reindex more documents from the source than `max_docs` until it + // has successfully indexed `max_docs` documents into the target or it has gone + // through every document in the source query. + // + // NOTE: The reindex API makes no effort to handle ID collisions. + // The last document written will "win" but the order isn't usually predictable + // so it is not a good idea to rely on this behavior. + // Instead, make sure that IDs are unique by using a script. + // + // **Running reindex asynchronously** + // + // If the request contains `wait_for_completion=false`, Elasticsearch performs + // some preflight checks, launches the request, and returns a task you can use + // to cancel or get the status of the task. + // Elasticsearch creates a record of this task as a document at + // `_tasks/`. + // + // **Reindex from multiple sources** + // + // If you have many sources to reindex it is generally better to reindex them + // one at a time rather than using a glob pattern to pick up multiple sources. + // That way you can resume the process if there are any errors by removing the + // partially completed source and starting over. + // It also makes parallelizing the process fairly simple: split the list of + // sources to reindex and run each list in parallel. + // + // For example, you can use a bash script like this: + // + // ``` + // for index in i1 i2 i3 i4 i5; do + // curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty + // -d'{ + // "source": { + // "index": "'$index'" + // }, + // "dest": { + // "index": "'$index'-reindexed" + // } + // }' + // done + // ``` + // + // **Throttling** + // + // Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, + // for example) to throttle the rate at which reindex issues batches of index + // operations. + // Requests are throttled by padding each batch with a wait time. + // To turn off throttling, set `requests_per_second` to `-1`. + // + // The throttling is done by waiting between batches so that the scroll that + // reindex uses internally can be given a timeout that takes into account the + // padding. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is `1000`, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single bulk request, large batch sizes cause + // Elasticsearch to create many requests and then wait for a while before + // starting the next set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Reindex supports sliced scroll to parallelize the reindexing process. + // This parallelization can improve efficiency and provide a convenient way to + // break the request down into smaller parts. + // + // NOTE: Reindexing from remote clusters does not support manual or automatic + // slicing. + // + // You can slice a reindex request manually by providing a slice ID and total + // number of slices to each request. + // You can also let reindex automatically parallelize by using sliced scroll to + // slice on `_id`. + // The `slices` parameter specifies the number of slices to use. + // + // Adding `slices` to the reindex request just automates the manual process, + // creating sub-requests which means it has some quirks: + // + // * You can see these requests in the tasks API. These sub-requests are "child" + // tasks of the task for the request with slices. + // * Fetching the status of the task for the request with `slices` only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with `slices` will cancel each sub-request. + // * Due to the nature of `slices`, each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // `slices` are distributed proportionally to each sub-request. Combine that + // with the previous point about distribution being uneven and you should + // conclude that using `max_docs` with `slices` might not result in exactly + // `max_docs` documents being reindexed. + // * Each sub-request gets a slightly different snapshot of the source, though + // these are all taken at approximately the same time. + // + // If slicing automatically, setting `slices` to `auto` will choose a reasonable + // number for most indices. + // If slicing manually or otherwise tuning automatic slicing, use the following + // guidelines. + // + // Query performance is most efficient when the number of slices is equal to the + // number of shards in the index. + // If that number is large (for example, `500`), choose a lower number as too + // many slices will hurt performance. + // Setting slices higher than the number of shards generally does not improve + // efficiency and adds overhead. + // + // Indexing performance scales linearly across available resources with the + // number of slices. + // + // Whether query or indexing performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Modify documents during reindexing** + // + // Like `_update_by_query`, reindex operations support a script that modifies + // the document. + // Unlike `_update_by_query`, the script is allowed to modify the document's + // metadata. + // + // Just as in `_update_by_query`, you can set `ctx.op` to change the operation + // that is run on the destination. + // For example, set `ctx.op` to `noop` if your script decides that the document + // doesn’t have to be indexed in the destination. This "no operation" will be + // reported in the `noop` counter in the response body. + // Set `ctx.op` to `delete` if your script decides that the document must be + // deleted from the destination. + // The deletion will be reported in the `deleted` counter in the response body. + // Setting `ctx.op` to anything else will return an error, as will setting any + // other field in `ctx`. + // + // Think of the possibilities! Just be careful; you are able to change: + // + // * `_id` + // * `_index` + // * `_version` + // * `_routing` + // + // Setting `_version` to `null` or clearing it from the `ctx` map is just like + // not sending the version in an indexing request. + // It will cause the document to be overwritten in the destination regardless of + // the version on the target or the version type you use in the reindex API. + // + // **Reindex from remote** + // + // Reindex supports reindexing from a remote Elasticsearch cluster. + // The `host` parameter must contain a scheme, host, port, and optional path. + // The `username` and `password` parameters are optional and when they are + // present the reindex operation will connect to the remote Elasticsearch node + // using basic authentication. + // Be sure to use HTTPS when using basic authentication or the password will be + // sent in plain text. + // There are a range of settings available to configure the behavior of the + // HTTPS connection. + // + // When using Elastic Cloud, it is also possible to authenticate against the + // remote cluster through the use of a valid API key. + // Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` + // setting. + // It can be set to a comma delimited list of allowed remote host and port + // combinations. + // Scheme is ignored; only the host and port are used. + // For example: + // + // ``` + // reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, + // localhost:*"] + // ``` + // + // The list of allowed hosts must be configured on any nodes that will + // coordinate the reindex. + // This feature should work with remote clusters of any version of + // Elasticsearch. + // This should enable you to upgrade from any version of Elasticsearch to the + // current version by reindexing from a cluster of the old version. + // + // WARNING: Elasticsearch does not support forward compatibility across major + // versions. + // For example, you cannot reindex from a 7.x cluster into a 6.x cluster. + // + // To enable queries sent to older versions of Elasticsearch, the `query` + // parameter is sent directly to the remote host without validation or + // modification. + // + // NOTE: Reindexing from remote clusters does not support manual or automatic + // slicing. + // + // Reindexing from a remote server uses an on-heap buffer that defaults to a + // maximum size of 100mb. + // If the remote index includes very large documents you'll need to use a + // smaller batch size. + // It is also possible to set the socket read timeout on the remote connection + // with the `socket_timeout` field and the connection timeout with the + // `connect_timeout` field. + // Both default to 30 seconds. + // + // **Configuring SSL parameters** + // + // Reindex from remote supports configurable SSL settings. + // These must be specified in the `elasticsearch.yml` file, with the exception + // of the secure settings, which you add in the Elasticsearch keystore. + // It is not possible to configure SSL in the body of the reindex request. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html Reindex core_reindex.NewReindex - // Copies documents from a source to a destination. + // Throttle a reindex operation. + // + // Change the number of requests per second for a particular reindex operation. + // For example: + // + // ``` + // POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 + // ``` + // + // Rethrottling that speeds up the query takes effect immediately. + // Rethrottling that slows down the query will take effect after completing the + // current batch. + // This behavior prevents scroll timeouts. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html ReindexRethrottle core_reindex_rethrottle.NewReindexRethrottle - // Renders a search template as a search request body. + // Render a search template. + // + // Render a search template as a search request body. // https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html RenderSearchTemplate core_render_search_template.NewRenderSearchTemplate // Run a script. + // // Runs a script and returns a result. + // Use this API to build and test scripts, such as when defining a script for a + // runtime field. + // This API requires very few dependencies and is especially useful if you don't + // have permissions to write documents on a cluster. + // + // The API uses several _contexts_, which control how scripts are run, what + // variables are available at runtime, and what the return type is. + // + // Each context requires a script, but additional parameters depend on the + // context you're using for that script. // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html ScriptsPainlessExecute core_scripts_painless_execute.NewScriptsPainlessExecute - // Allows to retrieve a large numbers of results from a single search request. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll + // Run a scrolling search. + // + // IMPORTANT: The scroll API is no longer recommend for deep pagination. If you + // need to preserve the index state while paging through more than 10,000 hits, + // use the `search_after` parameter with a point in time (PIT). + // + // The scroll API gets large sets of results from a single scrolling search + // request. + // To get the necessary scroll ID, submit a search API request that includes an + // argument for the `scroll` query parameter. + // The `scroll` parameter indicates how long Elasticsearch should retain the + // search context for the request. + // The search response returns a scroll ID in the `_scroll_id` response body + // parameter. + // You can then use the scroll ID with the scroll API to retrieve the next batch + // of results for the request. + // If the Elasticsearch security features are enabled, the access to the results + // of a specific scroll ID is restricted to the user or API key that submitted + // the search. + // + // You can also use the scroll API to specify a new scroll parameter that + // extends or shortens the retention period for the search context. + // + // IMPORTANT: Results from a scrolling search reflect the state of the index at + // the time of the initial search request. Subsequent indexing or document + // changes only affect later search and scroll requests. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/scroll-api.html Scroll core_scroll.NewScroll - // Returns search hits that match the query defined in the request. + // Run a search. + // + // Get search hits that match the query defined in the request. // You can provide search queries using the `q` query string parameter or the // request body. // If both are specified, only the query parameter is used. + // + // If the Elasticsearch security features are enabled, you must have the read + // index privilege for the target data stream, index, or alias. For + // cross-cluster search, refer to the documentation about configuring CCS + // privileges. + // To search a point in time (PIT) for an alias, you must have the `read` index + // privilege for the alias's data streams or indices. + // + // **Search slicing** + // + // When paging through a large number of documents, it can be helpful to split + // the search into multiple slices to consume them independently with the + // `slice` and `pit` properties. + // By default the splitting is done first on the shards, then locally on each + // shard. + // The local splitting partitions the shard into contiguous ranges based on + // Lucene document IDs. + // + // For instance if the number of shards is equal to 2 and you request 4 slices, + // the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + // assigned to the second shard. + // + // IMPORTANT: The same point-in-time ID should be used for all slices. + // If different PIT IDs are used, slices can overlap and miss documents. + // This situation can occur because the splitting criterion is based on Lucene + // document IDs, which are not stable across changes to the index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html Search core_search.NewSearch // Search a vector tile. - // Searches a vector tile for geospatial values. + // + // Search a vector tile for geospatial values. + // Before using this API, you should be familiar with the Mapbox vector tile + // specification. + // The API returns results as a binary mapbox vector tile. + // + // Internally, Elasticsearch translates a vector tile search API request into a + // search containing: + // + // * A `geo_bounding_box` query on the ``. The query uses the + // `//` tile as a bounding box. + // * A `geotile_grid` or `geohex_grid` aggregation on the ``. The + // `grid_agg` parameter determines the aggregation type. The aggregation uses + // the `//` tile as a bounding box. + // * Optionally, a `geo_bounds` aggregation on the ``. The search only + // includes this aggregation if the `exact_bounds` parameter is `true`. + // * If the optional parameter `with_labels` is `true`, the internal search will + // include a dynamic runtime field that calls the `getLabelPosition` function of + // the geometry doc value. This enables the generation of new point features + // containing suggested geometry labels, so that, for example, multi-polygons + // will have only one label. + // + // For example, Elasticsearch may translate a vector tile search API request + // with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of + // `true` into the following search + // + // ``` + // GET my-index/_search + // { + // "size": 10000, + // "query": { + // "geo_bounding_box": { + // "my-geo-field": { + // "top_left": { + // "lat": -40.979898069620134, + // "lon": -45 + // }, + // "bottom_right": { + // "lat": -66.51326044311186, + // "lon": 0 + // } + // } + // } + // }, + // "aggregations": { + // "grid": { + // "geotile_grid": { + // "field": "my-geo-field", + // "precision": 11, + // "size": 65536, + // "bounds": { + // "top_left": { + // "lat": -40.979898069620134, + // "lon": -45 + // }, + // "bottom_right": { + // "lat": -66.51326044311186, + // "lon": 0 + // } + // } + // } + // }, + // "bounds": { + // "geo_bounds": { + // "field": "my-geo-field", + // "wrap_longitude": false + // } + // } + // } + // } + // ``` + // + // The API returns results as a binary Mapbox vector tile. + // Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the + // tile contains three layers: + // + // * A `hits` layer containing a feature for each `` value matching the + // `geo_bounding_box` query. + // * An `aggs` layer containing a feature for each cell of the `geotile_grid` or + // `geohex_grid`. The layer only contains features for cells with matching data. + // * A meta layer containing: + // * A feature containing a bounding box. By default, this is the bounding box + // of the tile. + // * Value ranges for any sub-aggregations on the `geotile_grid` or + // `geohex_grid`. + // * Metadata for the search. + // + // The API only returns features that can display at its zoom level. + // For example, if a polygon feature has no area at its zoom level, the API + // omits it. + // The API returns errors as UTF-8 encoded JSON. + // + // IMPORTANT: You can specify several options for this API as either a query + // parameter or request body parameter. + // If you specify both parameters, the query parameter takes precedence. + // + // **Grid precision for geotile** + // + // For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles + // for lower zoom levels. + // `grid_precision` represents the additional zoom levels available through + // these cells. The final precision is computed by as follows: ` + + // grid_precision`. + // For example, if `` is 7 and `grid_precision` is 8, then the + // `geotile_grid` aggregation will use a precision of 15. + // The maximum final precision is 29. + // The `grid_precision` also determines the number of cells for the grid as + // follows: `(2^grid_precision) x (2^grid_precision)`. + // For example, a value of 8 divides the tile into a grid of 256 x 256 cells. + // The `aggs` layer only contains features for cells with matching data. + // + // **Grid precision for geohex** + // + // For a `grid_agg` of `geohex`, Elasticsearch uses `` and + // `grid_precision` to calculate a final precision as follows: ` + + // grid_precision`. + // + // This precision determines the H3 resolution of the hexagonal cells produced + // by the `geohex` aggregation. + // The following table maps the H3 resolution for each precision. + // For example, if `` is 3 and `grid_precision` is 3, the precision is 6. + // At a precision of 6, hexagonal cells have an H3 resolution of 2. + // If `` is 3 and `grid_precision` is 4, the precision is 7. + // At a precision of 7, hexagonal cells have an H3 resolution of 3. + // + // | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | + // | --------- | ---------------- | ------------- | ----------------| ----- | + // | 1 | 4 | 0 | 122 | 30.5 | + // | 2 | 16 | 0 | 122 | 7.625 | + // | 3 | 64 | 1 | 842 | 13.15625 | + // | 4 | 256 | 1 | 842 | 3.2890625 | + // | 5 | 1024 | 2 | 5882 | 5.744140625 | + // | 6 | 4096 | 2 | 5882 | 1.436035156 | + // | 7 | 16384 | 3 | 41162 | 2.512329102 | + // | 8 | 65536 | 3 | 41162 | 0.6280822754 | + // | 9 | 262144 | 4 | 288122 | 1.099098206 | + // | 10 | 1048576 | 4 | 288122 | 0.2747745514 | + // | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | + // | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | + // | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | + // | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | + // | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | + // | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | + // | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | + // | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | + // | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | + // | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | + // | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | + // | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | + // | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | + // | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | + // | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | + // | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | + // | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | + // | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | + // | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + // + // Hexagonal cells don't align perfectly on a vector tile. + // Some cells may intersect more than one vector tile. + // To compute the H3 resolution for each precision, Elasticsearch compares the + // average density of hexagonal bins at each resolution with the average density + // of tile bins at each zoom level. + // Elasticsearch uses the H3 resolution that is closest to the corresponding + // geotile density. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html SearchMvt core_search_mvt.NewSearchMvt - // Returns information about the indices and shards that a search request would - // be executed against. + // Get the search shards. + // + // Get the indices and shards that a search request would be run against. + // This information can be useful for working out issues or planning + // optimizations with routing and shard preferences. + // When filtered aliases are used, the filter is returned as part of the + // `indices` section. + // + // If the Elasticsearch security features are enabled, you must have the + // `view_index_metadata` or `manage` index privilege for the target data stream, + // index, or alias. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html SearchShards core_search_shards.NewSearchShards - // Runs a search with a search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html + // Run a search with a search template. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template-api.html SearchTemplate core_search_template.NewSearchTemplate - // The terms enum API can be used to discover terms in the index that begin - // with the provided string. It is designed for low-latency look-ups used in - // auto-complete scenarios. + // Get terms in an index. + // + // Discover terms that match a partial string in an index. + // This API is designed for low-latency look-ups used in auto-complete + // scenarios. + // + // > info + // > The terms enum API may return terms from deleted documents. Deleted + // documents are initially only marked as deleted. It is not until their + // segments are merged that documents are actually deleted. Until that happens, + // the terms enum API will return terms from these documents. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html TermsEnum core_terms_enum.NewTermsEnum // Get term vector information. - // Returns information and statistics about terms in the fields of a particular + // + // Get information and statistics about terms in the fields of a particular // document. + // + // You can retrieve term vectors for documents stored in the index or for + // artificial documents passed in the body of the request. + // You can specify the fields you are interested in through the `fields` + // parameter or by adding the fields to the request body. + // For example: + // + // ``` + // GET /my-index-000001/_termvectors/1?fields=message + // ``` + // + // Fields can be specified using wildcards, similar to the multi match query. + // + // Term vectors are real-time by default, not near real-time. + // This can be changed by setting `realtime` parameter to `false`. + // + // You can request three types of values: _term information_, _term statistics_, + // and _field statistics_. + // By default, all term information and field statistics are returned for all + // fields but term statistics are excluded. + // + // **Term information** + // + // * term frequency in the field (always returned) + // * term positions (`positions: true`) + // * start and end offsets (`offsets: true`) + // * term payloads (`payloads: true`), as base64 encoded bytes + // + // If the requested information wasn't stored in the index, it will be computed + // on the fly if possible. + // Additionally, term vectors could be computed for documents not even existing + // in the index, but instead provided by the user. + // + // > warn + // > Start and end offsets assume UTF-16 encoding is being used. If you want to + // use these offsets in order to get the original text that produced this token, + // you should make sure that the string you are taking a sub-string of is also + // encoded using UTF-16. + // + // **Behaviour** + // + // The term and field statistics are not accurate. + // Deleted documents are not taken into account. + // The information is only retrieved for the shard the requested document + // resides in. + // The term and field statistics are therefore only useful as relative measures + // whereas the absolute numbers have no meaning in this context. + // By default, when requesting term vectors of artificial documents, a shard to + // get the statistics from is randomly selected. + // Use `routing` only to hit a particular shard. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html Termvectors core_termvectors.NewTermvectors // Update a document. - // Updates a document by running a script or passing a partial document. + // + // Update a document by running a script or passing a partial document. + // + // If the Elasticsearch security features are enabled, you must have the `index` + // or `write` index privilege for the target index or index alias. + // + // The script can update, delete, or skip modifying the document. + // The API also supports passing a partial document, which is merged into the + // existing document. + // To fully replace an existing document, use the index API. + // This operation: + // + // * Gets the document (collocated with the shard) from the index. + // * Runs the specified script. + // * Indexes the result. + // + // The document must still be reindexed, but using this API removes some network + // roundtrips and reduces chances of version conflicts between the GET and the + // index operation. + // + // The `_source` field must be enabled to use this API. + // In addition to `_source`, you can access the following variables through the + // `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the + // current timestamp). // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html Update core_update.NewUpdate // Update documents. @@ -1239,23 +3320,179 @@ type Core struct { // If no query is specified, performs an update on every document in the data // stream or index without modifying the source, which is useful for picking up // mapping changes. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or alias: + // + // * `read` + // * `index` or `write` + // + // You can specify the query criteria in the request URI or the request body + // using the same syntax as the search API. + // + // When you submit an update by query request, Elasticsearch gets a snapshot of + // the data stream or index when it begins processing the request and updates + // matching documents using internal versioning. + // When the versions match, the document is updated and the version number is + // incremented. + // If a document changes between the time that the snapshot is taken and the + // update operation is processed, it results in a version conflict and the + // operation fails. + // You can opt to count version conflicts instead of halting and returning by + // setting `conflicts` to `proceed`. + // Note that if you opt to count version conflicts, the operation could attempt + // to update more documents from the source than `max_docs` until it has + // successfully updated `max_docs` documents or it has gone through every + // document in the source query. + // + // NOTE: Documents with a version equal to 0 cannot be updated using update by + // query because internal versioning does not support 0 as a valid version + // number. + // + // While processing an update by query request, Elasticsearch performs multiple + // search requests sequentially to find all of the matching documents. + // A bulk update request is performed for each batch of matching documents. + // Any query or update failures cause the update by query request to fail and + // the failures are shown in the response. + // Any update requests that completed successfully still stick, they are not + // rolled back. + // + // **Throttling update requests** + // + // To control the rate at which update by query issues batches of update + // operations, you can set `requests_per_second` to any positive decimal number. + // This pads each batch with a wait time to throttle the rate. + // Set `requests_per_second` to `-1` to turn off throttling. + // + // Throttling uses a wait time between batches so that the internal scroll + // requests can be given a timeout that takes the request padding into account. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is 1000, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single _bulk request, large batch sizes cause + // Elasticsearch to create many requests and wait before starting the next set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Update by query supports sliced scroll to parallelize the update process. + // This can improve efficiency and provide a convenient way to break the request + // down into smaller parts. + // + // Setting `slices` to `auto` chooses a reasonable number for most data streams + // and indices. + // This setting will use one slice per shard, up to a certain limit. + // If there are multiple source data streams or indices, it will choose the + // number of slices based on the index or backing index with the smallest number + // of shards. + // + // Adding `slices` to `_update_by_query` just automates the manual process of + // creating sub-requests, which means it has some quirks: + // + // * You can see these requests in the tasks APIs. These sub-requests are + // "child" tasks of the task for the request with slices. + // * Fetching the status of the task for the request with `slices` only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with slices will cancel each sub-request. + // * Due to the nature of slices each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // slices are distributed proportionally to each sub-request. Combine that with + // the point above about distribution being uneven and you should conclude that + // using `max_docs` with `slices` might not result in exactly `max_docs` + // documents being updated. + // * Each sub-request gets a slightly different snapshot of the source data + // stream or index though these are all taken at approximately the same time. + // + // If you're slicing manually or otherwise tuning automatic slicing, keep in + // mind that: + // + // * Query performance is most efficient when the number of slices is equal to + // the number of shards in the index or backing index. If that number is large + // (for example, 500), choose a lower number as too many slices hurts + // performance. Setting slices higher than the number of shards generally does + // not improve efficiency and adds overhead. + // * Update performance scales linearly across available resources with the + // number of slices. + // + // Whether query or update performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Update the document source** + // + // Update by query supports scripts to update the document source. + // As with the update API, you can set `ctx.op` to change the operation that is + // performed. + // + // Set `ctx.op = "noop"` if your script decides that it doesn't have to make any + // changes. + // The update by query operation skips updating the document and increments the + // `noop` counter. + // + // Set `ctx.op = "delete"` if your script decides that the document should be + // deleted. + // The update by query operation deletes the document and increments the + // `deleted` counter. + // + // Update by query supports only `index`, `noop`, and `delete`. + // Setting `ctx.op` to anything else is an error. + // Setting any other field in `ctx` is an error. + // This API enables you to only modify the source of matching documents; you + // cannot move them. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html UpdateByQuery core_update_by_query.NewUpdateByQuery - // Changes the number of requests per second for a particular Update By Query + // Throttle an update by query operation. + // + // Change the number of requests per second for a particular update by query // operation. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html + // Rethrottling that speeds up the query takes effect immediately but + // rethrotting that slows down the query takes effect after completing the + // current batch to prevent scroll timeouts. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html#docs-update-by-query-rethrottle UpdateByQueryRethrottle core_update_by_query_rethrottle.NewUpdateByQueryRethrottle } type DanglingIndices struct { - // Deletes the specified dangling index - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html + // Delete a dangling index. + // If Elasticsearch encounters index data that is absent from the current + // cluster state, those indices are considered to be dangling. + // For example, this can happen if you delete more than + // `cluster.indices.tombstones.size` indices while an Elasticsearch node is + // offline. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/dangling-index-delete.html DeleteDanglingIndex dangling_indices_delete_dangling_index.NewDeleteDanglingIndex - // Imports the specified dangling index - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html + // Import a dangling index. + // + // If Elasticsearch encounters index data that is absent from the current + // cluster state, those indices are considered to be dangling. + // For example, this can happen if you delete more than + // `cluster.indices.tombstones.size` indices while an Elasticsearch node is + // offline. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/dangling-index-import.html ImportDanglingIndex dangling_indices_import_dangling_index.NewImportDanglingIndex - // Returns all dangling indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html + // Get the dangling indices. + // + // If Elasticsearch encounters index data that is absent from the current + // cluster state, those indices are considered to be dangling. + // For example, this can happen if you delete more than + // `cluster.indices.tombstones.size` indices while an Elasticsearch node is + // offline. + // + // Use this API to list dangling indices, which you can then import or delete. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/dangling-indices-list.html ListDanglingIndices dangling_indices_list_dangling_indices.NewListDanglingIndices } @@ -1264,7 +3501,8 @@ type Enrich struct { // Deletes an existing enrich policy and its enrich index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-policy-api.html DeletePolicy enrich_delete_policy.NewDeletePolicy - // Creates the enrich index for an existing enrich policy. + // Run an enrich policy. + // Create the enrich index for an existing enrich policy. // https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html ExecutePolicy enrich_execute_policy.NewExecutePolicy // Get an enrich policy. @@ -1283,45 +3521,123 @@ type Enrich struct { } type Eql struct { - // Deletes an async EQL search or a stored synchronous EQL search. + // Delete an async EQL search. + // Delete an async EQL search or a stored synchronous EQL search. // The API also deletes results for the search. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-eql-delete Delete eql_delete.NewDelete - // Returns the current status and available results for an async EQL search or a + // Get async EQL search results. + // Get the current status and available results for an async EQL search or a // stored synchronous EQL search. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-search-api.html Get eql_get.NewGet - // Returns the current status for an async EQL search or a stored synchronous - // EQL search without returning results. + // Get the async EQL status. + // Get the current status for an async EQL search or a stored synchronous EQL + // search without returning results. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-status-api.html GetStatus eql_get_status.NewGetStatus - // Returns results matching a query expressed in Event Query Language (EQL) + // Get EQL search results. + // Returns search results for an Event Query Language (EQL) query. + // EQL assumes each document in a data stream or index corresponds to an event. // https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html Search eql_search.NewSearch } type Esql struct { - // Executes an ESQL request asynchronously + // Run an async ES|QL query. + // Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its + // progress, and retrieve results when they become available. + // + // The API accepts the same parameters and request body as the synchronous query + // API, along with additional async related properties. // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-api.html AsyncQuery esql_async_query.NewAsyncQuery - // Executes an ES|QL request + // Delete an async ES|QL query. + // If the query is still running, it is cancelled. + // Otherwise, the stored results are deleted. + // + // If the Elasticsearch security features are enabled, only the following users + // can use this API to delete a query: + // + // * The authenticated user that submitted the original query request + // * Users with the `cancel_task` cluster privilege + // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-delete-api.html + AsyncQueryDelete esql_async_query_delete.NewAsyncQueryDelete + // Get async ES|QL query results. + // Get the current status and available results or stored results for an ES|QL + // asynchronous query. + // If the Elasticsearch security features are enabled, only the user who first + // submitted the ES|QL query can retrieve the results using this API. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-get-api.html + AsyncQueryGet esql_async_query_get.NewAsyncQueryGet + // Stop async ES|QL query. + // + // This API interrupts the query execution and returns the results so far. + // If the Elasticsearch security features are enabled, only the user who first + // submitted the ES|QL query can stop it. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-stop-api.html + AsyncQueryStop esql_async_query_stop.NewAsyncQueryStop + // Run an ES|QL query. + // Get search results for an ES|QL (Elasticsearch query language) query. // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html Query esql_query.NewQuery } type Features struct { - // Gets a list of features which can be included in snapshots using the - // feature_states field when creating a snapshot + // Get the features. + // Get a list of features that can be included in snapshots using the + // `feature_states` field when creating a snapshot. + // You can use this API to determine which feature states to include when taking + // a snapshot. + // By default, all feature states are included in a snapshot if that snapshot + // includes the global state, or none if it does not. + // + // A feature state includes one or more system indices necessary for a given + // feature to function. + // In order to ensure data integrity, all system indices that comprise a feature + // state are snapshotted and restored together. + // + // The features listed by this API are a combination of built-in features and + // features defined by plugins. + // In order for a feature state to be listed in this API and recognized as a + // valid feature state by the create snapshot API, the plugin that defines that + // feature must be installed on the master node. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-features-api.html GetFeatures features_get_features.NewGetFeatures - // Resets the internal state of features, usually by deleting system indices - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Reset the features. + // Clear all of the state information stored in system indices by Elasticsearch + // features, including the security and machine learning indices. + // + // WARNING: Intended for development and testing use only. Do not reset features + // on a production cluster. + // + // Return a cluster to the same state as a new installation by resetting the + // feature state for all Elasticsearch features. + // This deletes all state information stored in system indices. + // + // The response code is HTTP 200 if the state is successfully reset for all + // features. + // It is HTTP 500 if the reset operation failed for any feature. + // + // Note that select features might provide a way to reset particular system + // indices. + // Using this API resets all features, both those that are built-in and + // implemented as plugins. + // + // To list the features that will be affected, use the get features API. + // + // IMPORTANT: The features installed on the node you submit this request to are + // the features that will be reset. Run on the master node if you have any + // doubts about which plugins are installed on individual nodes. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-features-api.html ResetFeatures features_reset_features.NewResetFeatures } type Fleet struct { - // Returns the current global checkpoints for an index. This API is design for - // internal use by the fleet server project. + // Get global checkpoints. + // + // Get the current global checkpoints for an index. + // This API is designed for internal use by the Fleet server project. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-global-checkpoints.html GlobalCheckpoints fleet_global_checkpoints.NewGlobalCheckpoints // Executes several [fleet @@ -1331,7 +3647,7 @@ type Fleet struct { // search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) // API. However, similar to the fleet search API, it // supports the wait_for_checkpoints parameter. - // + // https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-multi-search.html Msearch fleet_msearch.NewMsearch // Creates a secret stored by Fleet. // @@ -1340,104 +3656,346 @@ type Fleet struct { // search will only be executed // after provided checkpoint has been processed and is visible for searches // inside of Elasticsearch. - // + // https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html Search fleet_search.NewSearch } type Graph struct { - // Extracts and summarizes information about the documents and terms in an + // Explore graph analytics. + // Extract and summarize information about the documents and terms in an // Elasticsearch data stream or index. + // The easiest way to understand the behavior of this API is to use the Graph UI + // to explore connections. + // An initial request to the `_explore` API contains a seed query that + // identifies the documents of interest and specifies the fields that define the + // vertices and connections you want to include in the graph. + // Subsequent requests enable you to spider out from one more vertices of + // interest. + // You can exclude vertices that have already been returned. // https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html Explore graph_explore.NewExplore } type Ilm struct { - // Deletes the specified lifecycle policy definition. You cannot delete policies - // that are currently in use. If the policy is being used to manage any indices, - // the request fails and returns an error. + // Delete a lifecycle policy. + // You cannot delete policies that are currently in use. If the policy is being + // used to manage any indices, the request fails and returns an error. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html DeleteLifecycle ilm_delete_lifecycle.NewDeleteLifecycle - // Retrieves information about the index’s current lifecycle state, such as the - // currently executing phase, action, and step. Shows when the index entered - // each one, the definition of the running phase, and information about any - // failures. + // Explain the lifecycle state. + // Get the current lifecycle status for one or more indices. + // For data streams, the API retrieves the current lifecycle status for the + // stream's backing indices. + // + // The response indicates when the index entered each lifecycle state, provides + // the definition of the running phase, and information about any failures. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-lifecycle.html ExplainLifecycle ilm_explain_lifecycle.NewExplainLifecycle - // Retrieves a lifecycle policy. + // Get lifecycle policies. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html GetLifecycle ilm_get_lifecycle.NewGetLifecycle - // Retrieves the current index lifecycle management (ILM) status. + // Get the ILM status. + // + // Get the current index lifecycle management status. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.html GetStatus ilm_get_status.NewGetStatus - // Switches the indices, ILM policies, and legacy, composable and component - // templates from using custom node attributes and - // attribute-based allocation filters to using data tiers, and optionally - // deletes one legacy index template.+ + // Migrate to data tiers routing. + // Switch the indices, ILM policies, and legacy, composable, and component + // templates from using custom node attributes and attribute-based allocation + // filters to using data tiers. + // Optionally, delete one legacy index template. // Using node roles enables ILM to automatically move the indices between data // tiers. + // + // Migrating away from custom node attributes routing can be manually performed. + // This API provides an automated way of performing three out of the four manual + // steps listed in the migration guide: + // + // 1. Stop setting the custom hot attribute on new indices. + // 1. Remove custom allocation settings from existing ILM policies. + // 1. Replace custom allocation settings from existing indices with the + // corresponding tier preference. + // + // ILM must be stopped before performing the migration. + // Use the stop ILM and get ILM status APIs to wait until the reported operation + // mode is `STOPPED`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-data-tiers.html MigrateToDataTiers ilm_migrate_to_data_tiers.NewMigrateToDataTiers - // Manually moves an index into the specified step and executes that step. + // Move to a lifecycle step. + // Manually move an index into a specific step in the lifecycle policy and run + // that step. + // + // WARNING: This operation can result in the loss of data. Manually moving an + // index into a specific step runs that step even if it has already been + // performed. This is a potentially destructive action and this should be + // considered an expert level API. + // + // You must specify both the current step and the step to be executed in the + // body of the request. + // The request will fail if the current step does not match the step currently + // running for the index + // This is to prevent the index from being moved from an unexpected step into + // the next step. + // + // When specifying the target (`next_step`) to which the index will be moved, + // either the name or both the action and name fields are optional. + // If only the phase is specified, the index will move to the first step of the + // first action in the target phase. + // If the phase and action are specified, the index will move to the first step + // of the specified action in the specified phase. + // Only actions specified in the ILM policy are considered valid. + // An index cannot move to a step that is not part of its policy. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html MoveToStep ilm_move_to_step.NewMoveToStep - // Creates a lifecycle policy. If the specified policy exists, the policy is - // replaced and the policy version is incremented. + // Create or update a lifecycle policy. + // If the specified policy exists, it is replaced and the policy version is + // incremented. + // + // NOTE: Only the latest version of the policy is stored, you cannot revert to + // previous versions. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html PutLifecycle ilm_put_lifecycle.NewPutLifecycle - // Removes the assigned lifecycle policy and stops managing the specified index + // Remove policies from an index. + // Remove the assigned lifecycle policies from an index or a data stream's + // backing indices. + // It also stops managing the indices. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html RemovePolicy ilm_remove_policy.NewRemovePolicy - // Retries executing the policy for an index that is in the ERROR step. + // Retry a policy. + // Retry running the lifecycle policy for an index that is in the ERROR step. + // The API sets the policy back to the step where the error occurred and runs + // the step. + // Use the explain lifecycle state API to determine whether an index is in the + // ERROR step. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html Retry ilm_retry.NewRetry - // Start the index lifecycle management (ILM) plugin. + // Start the ILM plugin. + // Start the index lifecycle management plugin if it is currently stopped. + // ILM is started automatically when the cluster is formed. + // Restarting ILM is necessary only when it has been stopped using the stop ILM + // API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html Start ilm_start.NewStart - // Halts all lifecycle management operations and stops the index lifecycle - // management (ILM) plugin + // Stop the ILM plugin. + // Halt all lifecycle management operations and stop the index lifecycle + // management plugin. + // This is useful when you are performing maintenance on the cluster and need to + // prevent ILM from performing any actions on your indices. + // + // The API returns as soon as the stop request has been acknowledged, but the + // plugin might continue to run until in-progress operations complete and the + // plugin can be safely stopped. + // Use the get ILM status API to check whether ILM is running. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html Stop ilm_stop.NewStop } type Indices struct { // Add an index block. - // Limits the operations allowed on an index by blocking specific operation - // types. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html + // + // Add an index block to an index. + // Index blocks limit the operations allowed on an index by blocking specific + // operation types. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html#add-index-block AddBlock indices_add_block.NewAddBlock - // Performs analysis on a text string and returns the resulting tokens. + // Get tokens from text analysis. + // The analyze API performs analysis on a text string and returns the resulting + // tokens. + // + // Generating excessive amount of tokens may cause a node to run out of memory. + // The `index.analyze.max_token_count` setting enables you to limit the number + // of tokens that can be produced. + // If more than this limit of tokens gets generated, an error occurs. + // The `_analyze` endpoint without a specified index will always use `10000` as + // its limit. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html Analyze indices_analyze.NewAnalyze - // Clears the caches of one or more indices. - // For data streams, the API clears the caches of the stream’s backing indices. + // Cancel a migration reindex operation. + // + // Cancel a migration reindex attempt for a data stream or index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html + CancelMigrateReindex indices_cancel_migrate_reindex.NewCancelMigrateReindex + // Clear the cache. + // Clear the cache of one or more indices. + // For data streams, the API clears the caches of the stream's backing indices. + // + // By default, the clear cache API clears all caches. + // To clear only specific caches, use the `fielddata`, `query`, or `request` + // parameters. + // To clear the cache only of specific fields, use the `fields` parameter. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html ClearCache indices_clear_cache.NewClearCache - // Clones an existing index. + // Clone an index. + // Clone an existing index into a new index. + // Each original primary shard is cloned into a new primary shard in the new + // index. + // + // IMPORTANT: Elasticsearch does not apply index templates to the resulting + // index. + // The API also does not copy index metadata from the original index. + // Index metadata includes aliases, index lifecycle management phase + // definitions, and cross-cluster replication (CCR) follower information. + // For example, if you clone a CCR follower index, the resulting clone will not + // be a follower index. + // + // The clone API copies most index settings from the source index to the + // resulting index, with the exception of `index.number_of_replicas` and + // `index.auto_expand_replicas`. + // To set the number of replicas in the resulting index, configure these + // settings in the clone request. + // + // Cloning works as follows: + // + // * First, it creates a new target index with the same definition as the source + // index. + // * Then it hard-links segments from the source index into the target index. If + // the file system does not support hard-linking, all segments are copied into + // the new index, which is a much more time consuming process. + // * Finally, it recovers the target index as though it were a closed index + // which had just been re-opened. + // + // IMPORTANT: Indices can only be cloned if they meet the following + // requirements: + // + // * The index must be marked as read-only and have a cluster health status of + // green. + // * The target index must not exist. + // * The source index must have the same number of primary shards as the target + // index. + // * The node handling the clone process must have sufficient free disk space to + // accommodate a second copy of the existing index. + // + // The current write index on a data stream cannot be cloned. + // In order to clone the current write index, the data stream must first be + // rolled over so that a new write index is created and then the previous write + // index can be cloned. + // + // NOTE: Mappings cannot be specified in the `_clone` request. The mappings of + // the source index will be used for the target index. + // + // **Monitor the cloning process** + // + // The cloning process can be monitored with the cat recovery API or the cluster + // health API can be used to wait until all primary shards have been allocated + // by setting the `wait_for_status` parameter to `yellow`. + // + // The `_clone` API returns as soon as the target index has been added to the + // cluster state, before any shards have been allocated. + // At this point, all shards are in the state unassigned. + // If, for any reason, the target index can't be allocated, its primary shard + // will remain unassigned until it can be allocated on that node. + // + // Once the primary shard is allocated, it moves to state initializing, and the + // clone process begins. + // When the clone operation completes, the shard will become active. + // At that point, Elasticsearch will try to allocate any replicas and may decide + // to relocate the primary shard to another node. + // + // **Wait for active shards** + // + // Because the clone operation creates a new index to clone the shards to, the + // wait for active shards setting on index creation applies to the clone index + // action as well. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clone-index.html Clone indices_clone.NewClone - // Closes an index. + // Close an index. + // A closed index is blocked for read or write operations and does not allow all + // operations that opened indices allow. + // It is not possible to index documents or to search for documents in a closed + // index. + // Closed indices do not have to maintain internal data structures for indexing + // or searching documents, which results in a smaller overhead on the cluster. + // + // When opening or closing an index, the master node is responsible for + // restarting the index shards to reflect the new state of the index. + // The shards will then go through the normal recovery process. + // The data of opened and closed indices is automatically replicated by the + // cluster to ensure that enough shard copies are safely kept around at all + // times. + // + // You can open and close multiple indices. + // An error is thrown if the request explicitly refers to a missing index. + // This behaviour can be turned off using the `ignore_unavailable=true` + // parameter. + // + // By default, you must explicitly name the indices you are opening or closing. + // To open or close indices with `_all`, `*`, or other wildcard expressions, + // change the` action.destructive_requires_name` setting to `false`. This + // setting can also be changed with the cluster update settings API. + // + // Closed indices consume a significant amount of disk-space which can cause + // problems in managed environments. + // Closing indices can be turned off with the cluster settings API by setting + // `cluster.indices.close.enable` to `false`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-close.html Close indices_close.NewClose // Create an index. - // Creates a new index. + // You can use the create index API to add a new index to an Elasticsearch + // cluster. + // When creating an index, you can specify the following: + // + // * Settings for the index. + // * Mappings for fields in the index. + // * Index aliases + // + // **Wait for active shards** + // + // By default, index creation will only return a response to the client when the + // primary copies of each shard have been started, or the request times out. + // The index creation response will indicate what happened. + // For example, `acknowledged` indicates whether the index was successfully + // created in the cluster, `while shards_acknowledged` indicates whether the + // requisite number of shard copies were started for each shard in the index + // before timing out. + // Note that it is still possible for either `acknowledged` or + // `shards_acknowledged` to be `false`, but for the index creation to be + // successful. + // These values simply indicate whether the operation completed before the + // timeout. + // If `acknowledged` is false, the request timed out before the cluster state + // was updated with the newly created index, but it probably will be created + // sometime soon. + // If `shards_acknowledged` is false, then the request timed out before the + // requisite number of shards were started (by default just the primaries), even + // if the cluster state was successfully updated to reflect the newly created + // index (that is to say, `acknowledged` is `true`). + // + // You can change the default of only waiting for the primary shards to start + // through the index setting `index.write.wait_for_active_shards`. + // Note that changing this setting will also affect the `wait_for_active_shards` + // value on all subsequent write operations. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html Create indices_create.NewCreate // Create a data stream. - // Creates a data stream. + // // You must have a matching index template with data stream enabled. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-data-stream.html CreateDataStream indices_create_data_stream.NewCreateDataStream + // Create an index from a source index. + // + // Copy the mappings and settings from the source index to a destination index + // while allowing request settings and mappings to override the source values. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html + CreateFrom indices_create_from.NewCreateFrom // Get data stream stats. - // Retrieves statistics for one or more data streams. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // + // Get statistics for one or more data streams. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-stream-stats-api.html DataStreamsStats indices_data_streams_stats.NewDataStreamsStats // Delete indices. - // Deletes one or more indices. + // Deleting an index deletes its documents, shards, and metadata. + // It does not delete related Kibana components, such as data views, + // visualizations, or dashboards. + // + // You cannot delete the current write index of a data stream. + // To delete the index, you must roll over the data stream so a new write index + // is created. + // You can then use the delete index API to delete the previous write index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html Delete indices_delete.NewDelete // Delete an alias. // Removes a data stream or index from an alias. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-alias.html DeleteAlias indices_delete_alias.NewDeleteAlias // Delete data stream lifecycles. // Removes the data stream lifecycle from a data stream, rendering it not @@ -1446,7 +4004,7 @@ type Indices struct { DeleteDataLifecycle indices_delete_data_lifecycle.NewDeleteDataLifecycle // Delete data streams. // Deletes one or more data streams and their backing indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-data-stream.html DeleteDataStream indices_delete_data_stream.NewDeleteDataStream // Delete an index template. // The provided may contain multiple template names separated @@ -1456,92 +4014,263 @@ type Indices struct { // existing templates. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-template.html DeleteIndexTemplate indices_delete_index_template.NewDeleteIndexTemplate - // Deletes a legacy index template. + // Delete a legacy index template. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-template-v1.html DeleteTemplate indices_delete_template.NewDeleteTemplate - // Analyzes the disk usage of each field of an index or data stream. + // Analyze the index disk usage. + // Analyze the disk usage of each field of an index or data stream. + // This API might not support indices created in previous Elasticsearch + // versions. + // The result of a small index can be inaccurate as some parts of an index might + // not be analyzed by the API. + // + // NOTE: The total size of fields of the analyzed shards of the index in the + // response is usually smaller than the index `store_size` value because some + // small metadata files are ignored and some parts of data files might not be + // scanned by the API. + // Since stored fields are stored together in a compressed format, the sizes of + // stored fields are also estimates and can be inaccurate. + // The stored size of the `_id` field is likely underestimated while the + // `_source` field is overestimated. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-disk-usage.html DiskUsage indices_disk_usage.NewDiskUsage - // Aggregates a time series (TSDS) index and stores pre-computed statistical + // Downsample an index. + // Aggregate a time series (TSDS) index and store pre-computed statistical // summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric // field grouped by a configured time interval. + // For example, a TSDS index that contains metrics sampled every 10 seconds can + // be downsampled to an hourly index. + // All documents within an hour interval are summarized and stored as a single + // document in the downsample index. + // + // NOTE: Only indices in a time series data stream are supported. + // Neither field nor document level security can be defined on the source index. + // The source index must be read only (`index.blocks.write: true`). // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-downsample-data-stream.html Downsample indices_downsample.NewDownsample // Check indices. - // Checks if one or more indices, index aliases, or data streams exist. + // Check if one or more indices, index aliases, or data streams exist. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html Exists indices_exists.NewExists // Check aliases. - // Checks if one or more data stream or index aliases exist. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // + // Check if one or more data stream or index aliases exist. + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-alias ExistsAlias indices_exists_alias.NewExistsAlias - // Returns information about whether a particular index template exists. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html + // Check index templates. + // + // Check whether index templates exist. + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-index-template ExistsIndexTemplate indices_exists_index_template.NewExistsIndexTemplate // Check existence of index templates. - // Returns information about whether a particular index template exists. + // Get information about whether index templates exist. + // Index templates define settings, mappings, and aliases that can be applied + // automatically to new indices. + // + // IMPORTANT: This documentation is about legacy index templates, which are + // deprecated and will be replaced by the composable templates introduced in + // Elasticsearch 7.8. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-template-exists-v1.html ExistsTemplate indices_exists_template.NewExistsTemplate // Get the status for a data stream lifecycle. - // Retrieves information about an index or data stream’s current data stream - // lifecycle status, such as time since index creation, time since rollover, the - // lifecycle configuration managing the index, or any errors encountered during - // lifecycle execution. + // Get information about an index or data stream's current data stream lifecycle + // status, such as time since index creation, time since rollover, the lifecycle + // configuration managing the index, or any errors encountered during lifecycle + // execution. // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html ExplainDataLifecycle indices_explain_data_lifecycle.NewExplainDataLifecycle - // Returns field usage information for each shard and field of an index. + // Get field usage stats. + // Get field usage information for each shard and field of an index. + // Field usage statistics are automatically captured when queries are running on + // a cluster. + // A shard-level search request that accesses a given field, even if multiple + // times during that request, is counted as a single use. + // + // The response body reports the per-shard usage count of the data structures + // that back the fields in the index. + // A given request will increment each count by a maximum value of 1, even if + // the request accesses the same field multiple times. // https://www.elastic.co/guide/en/elasticsearch/reference/current/field-usage-stats.html FieldUsageStats indices_field_usage_stats.NewFieldUsageStats - // Flushes one or more data streams or indices. + // Flush data streams or indices. + // Flushing a data stream or index is the process of making sure that any data + // that is currently only stored in the transaction log is also permanently + // stored in the Lucene index. + // When restarting, Elasticsearch replays any unflushed operations from the + // transaction log into the Lucene index to bring it back into the state that it + // was in before the restart. + // Elasticsearch automatically triggers flushes as needed, using heuristics that + // trade off the size of the unflushed transaction log against the cost of + // performing each flush. + // + // After each operation has been flushed it is permanently stored in the Lucene + // index. + // This may mean that there is no need to maintain an additional copy of it in + // the transaction log. + // The transaction log is made up of multiple files, called generations, and + // Elasticsearch will delete any generation files when they are no longer + // needed, freeing up disk space. + // + // It is also possible to trigger a flush on one or more indices using the flush + // API, although it is rare for users to need to call this API directly. + // If you call the flush API after indexing some documents then a successful + // response indicates that Elasticsearch has flushed all the documents that were + // indexed before the flush API was called. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html Flush indices_flush.NewFlush - // Performs the force merge operation on one or more indices. + // Force a merge. + // Perform the force merge operation on the shards of one or more indices. + // For data streams, the API forces a merge on the shards of the stream's + // backing indices. + // + // Merging reduces the number of segments in each shard by merging some of them + // together and also frees up the space used by deleted documents. + // Merging normally happens automatically, but sometimes it is useful to trigger + // a merge manually. + // + // WARNING: We recommend force merging only a read-only index (meaning the index + // is no longer receiving writes). + // When documents are updated or deleted, the old version is not immediately + // removed but instead soft-deleted and marked with a "tombstone". + // These soft-deleted documents are automatically cleaned up during regular + // segment merges. + // But force merge can cause very large (greater than 5 GB) segments to be + // produced, which are not eligible for regular merges. + // So the number of soft-deleted documents can then grow rapidly, resulting in + // higher disk usage and worse search performance. + // If you regularly force merge an index receiving writes, this can also make + // snapshots more expensive, since the new documents can't be backed up + // incrementally. + // + // **Blocks during a force merge** + // + // Calls to this API block until the merge is complete (unless request contains + // `wait_for_completion=false`). + // If the client connection is lost before completion then the force merge + // process will continue in the background. + // Any new requests to force merge the same indices will also block until the + // ongoing force merge is complete. + // + // **Running force merge asynchronously** + // + // If the request contains `wait_for_completion=false`, Elasticsearch performs + // some preflight checks, launches the request, and returns a task you can use + // to get the status of the task. + // However, you can not cancel this task as the force merge task is not + // cancelable. + // Elasticsearch creates a record of this task as a document at + // `_tasks/`. + // When you are done with a task, you should delete the task document so + // Elasticsearch can reclaim the space. + // + // **Force merging multiple indices** + // + // You can force merge multiple indices with a single request by targeting: + // + // * One or more data streams that contain multiple backing indices + // * Multiple indices + // * One or more aliases + // * All data streams and indices in a cluster + // + // Each targeted shard is force-merged separately using the force_merge + // threadpool. + // By default each node only has a single `force_merge` thread which means that + // the shards on that node are force-merged one at a time. + // If you expand the `force_merge` threadpool on a node then it will force merge + // its shards in parallel + // + // Force merge makes the storage for the shard being merged temporarily + // increase, as it may require free space up to triple its size in case + // `max_num_segments parameter` is set to `1`, to rewrite all segments into a + // new one. + // + // **Data streams and time-based indices** + // + // Force-merging is useful for managing a data stream's older backing indices + // and other time-based indices, particularly after a rollover. + // In these cases, each index only receives indexing traffic for a certain + // period of time. + // Once an index receive no more writes, its shards can be force-merged to a + // single segment. + // This can be a good idea because single-segment shards can sometimes use + // simpler and more efficient data structures to perform searches. + // For example: + // + // ``` + // POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 + // ``` // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html Forcemerge indices_forcemerge.NewForcemerge // Get index information. - // Returns information about one or more indices. For data streams, the API - // returns information about the + // Get information about one or more indices. For data streams, the API returns + // information about the // stream’s backing indices. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html Get indices_get.NewGet // Get aliases. // Retrieves information for one or more data stream or index aliases. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-alias.html GetAlias indices_get_alias.NewGetAlias // Get data stream lifecycles. - // Retrieves the data stream lifecycle configuration of one or more data - // streams. + // + // Get the data stream lifecycle configuration of one or more data streams. // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-get-lifecycle.html GetDataLifecycle indices_get_data_lifecycle.NewGetDataLifecycle + // Get data stream lifecycle stats. + // Get statistics about the data streams that are managed by a data stream + // lifecycle. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-get-lifecycle-stats.html + GetDataLifecycleStats indices_get_data_lifecycle_stats.NewGetDataLifecycleStats // Get data streams. - // Retrieves information about one or more data streams. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // + // Get information about one or more data streams. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-data-stream.html GetDataStream indices_get_data_stream.NewGetDataStream // Get mapping definitions. // Retrieves mapping definitions for one or more fields. // For data streams, the API retrieves field mappings for the stream’s backing // indices. + // + // This API is useful if you don't need a complete mapping or if an index + // mapping contains a large number of fields. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-field-mapping.html GetFieldMapping indices_get_field_mapping.NewGetFieldMapping // Get index templates. - // Returns information about one or more index templates. + // Get information about one or more index templates. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-template.html GetIndexTemplate indices_get_index_template.NewGetIndexTemplate // Get mapping definitions. - // Retrieves mapping definitions for one or more indices. // For data streams, the API retrieves mappings for the stream’s backing // indices. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html GetMapping indices_get_mapping.NewGetMapping + // Get the migration reindexing status. + // + // Get the status of a migration reindex attempt for a data stream or index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html + GetMigrateReindexStatus indices_get_migrate_reindex_status.NewGetMigrateReindexStatus // Get index settings. - // Returns setting information for one or more indices. For data streams, - // returns setting information for the stream’s backing indices. + // Get setting information for one or more indices. + // For data streams, it returns setting information for the stream's backing + // indices. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html GetSettings indices_get_settings.NewGetSettings // Get index templates. - // Retrieves information about one or more index templates. + // Get information about one or more index templates. + // + // IMPORTANT: This documentation is about legacy index templates, which are + // deprecated and will be replaced by the composable templates introduced in + // Elasticsearch 7.8. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-template-v1.html GetTemplate indices_get_template.NewGetTemplate + // Reindex legacy backing indices. + // + // Reindex all legacy backing indices for a data stream. + // This operation occurs in a persistent task. + // The persistent task ID is returned immediately and the reindexing work is + // completed in that task. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html + MigrateReindex indices_migrate_reindex.NewMigrateReindex // Convert an index alias to a data stream. // Converts an index alias to a data stream. // You must have a matching index template that is data stream enabled. @@ -1555,55 +4284,266 @@ type Indices struct { // the same name. // The indices for the alias become hidden backing indices for the stream. // The write index for the alias becomes the write index for the stream. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-migrate-to-data-stream MigrateToDataStream indices_migrate_to_data_stream.NewMigrateToDataStream // Update data streams. // Performs one or more data stream modification actions in a single atomic // operation. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-modify-data-stream ModifyDataStream indices_modify_data_stream.NewModifyDataStream - // Opens a closed index. + // Open a closed index. // For data streams, the API opens any closed backing indices. + // + // A closed index is blocked for read/write operations and does not allow all + // operations that opened indices allow. + // It is not possible to index documents or to search for documents in a closed + // index. + // This allows closed indices to not have to maintain internal data structures + // for indexing or searching documents, resulting in a smaller overhead on the + // cluster. + // + // When opening or closing an index, the master is responsible for restarting + // the index shards to reflect the new state of the index. + // The shards will then go through the normal recovery process. + // The data of opened or closed indices is automatically replicated by the + // cluster to ensure that enough shard copies are safely kept around at all + // times. + // + // You can open and close multiple indices. + // An error is thrown if the request explicitly refers to a missing index. + // This behavior can be turned off by using the `ignore_unavailable=true` + // parameter. + // + // By default, you must explicitly name the indices you are opening or closing. + // To open or close indices with `_all`, `*`, or other wildcard expressions, + // change the `action.destructive_requires_name` setting to `false`. + // This setting can also be changed with the cluster update settings API. + // + // Closed indices consume a significant amount of disk-space which can cause + // problems in managed environments. + // Closing indices can be turned off with the cluster settings API by setting + // `cluster.indices.close.enable` to `false`. + // + // Because opening or closing an index allocates its shards, the + // `wait_for_active_shards` setting on index creation applies to the `_open` and + // `_close` index actions as well. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html Open indices_open.NewOpen - // Promotes a data stream from a replicated data stream managed by CCR to a - // regular data stream - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html + // Promote a data stream. + // Promote a data stream from a replicated data stream managed by cross-cluster + // replication (CCR) to a regular data stream. + // + // With CCR auto following, a data stream from a remote cluster can be + // replicated to the local cluster. + // These data streams can't be rolled over in the local cluster. + // These replicated data streams roll over only if the upstream data stream + // rolls over. + // In the event that the remote cluster is no longer available, the data stream + // in the local cluster can be promoted to a regular data stream, which allows + // these data streams to be rolled over in the local cluster. + // + // NOTE: When promoting a data stream, ensure the local cluster has a data + // stream enabled index template that matches the data stream. + // If this is missing, the data stream will not be able to roll over until a + // matching index template is created. + // This will affect the lifecycle management of the data stream and interfere + // with the data stream size and retention. + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-promote-data-stream PromoteDataStream indices_promote_data_stream.NewPromoteDataStream // Create or update an alias. // Adds a data stream or index to an alias. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-alias PutAlias indices_put_alias.NewPutAlias // Update data stream lifecycles. // Update the data stream lifecycle of the specified data streams. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-put-lifecycle.html + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-data-lifecycle PutDataLifecycle indices_put_data_lifecycle.NewPutDataLifecycle // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-template.html + // + // Elasticsearch applies templates to new indices based on an wildcard pattern + // that matches the index name. + // Index templates are applied during data stream or index creation. + // For data streams, these settings and mappings are applied when the stream's + // backing indices are created. + // Settings and mappings specified in a create index API request override any + // settings or mappings specified in an index template. + // Changes to index templates do not affect existing indices, including the + // existing backing indices of a data stream. + // + // You can use C-style `/* *\/` block comments in index templates. + // You can include comments anywhere in the request body, except before the + // opening curly bracket. + // + // **Multiple matching templates** + // + // If multiple index templates match the name of a new index or data stream, the + // template with the highest priority is used. + // + // Multiple templates with overlapping index patterns at the same priority are + // not allowed and an error will be thrown when attempting to create a template + // matching an existing index template at identical priorities. + // + // **Composing aliases, mappings, and settings** + // + // When multiple component templates are specified in the `composed_of` field + // for an index template, they are merged in the order specified, meaning that + // later component templates override earlier component templates. + // Any mappings, settings, or aliases from the parent index template are merged + // in next. + // Finally, any configuration on the index request itself is merged. + // Mapping definitions are merged recursively, which means that later mapping + // components can introduce new field mappings and update the mapping + // configuration. + // If a field mapping is already contained in an earlier component, its + // definition will be completely overwritten by the later one. + // This recursive merging strategy applies not only to field mappings, but also + // root options like `dynamic_templates` and `meta`. + // If an earlier component contains a `dynamic_templates` block, then by default + // new `dynamic_templates` entries are appended onto the end. + // If an entry already exists with the same key, then it is overwritten by the + // new definition. + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-index-template PutIndexTemplate indices_put_index_template.NewPutIndexTemplate // Update field mappings. - // Adds new fields to an existing data stream or index. - // You can also use this API to change the search settings of existing fields. + // Add new fields to an existing data stream or index. + // You can also use this API to change the search settings of existing fields + // and add new properties to existing object fields. // For data streams, these changes are applied to all backing indices by // default. + // + // **Add multi-fields to an existing field** + // + // Multi-fields let you index the same field in different ways. + // You can use this API to update the fields mapping parameter and enable + // multi-fields for an existing field. + // WARNING: If an index (or data stream) contains documents when you add a + // multi-field, those documents will not have values for the new multi-field. + // You can populate the new multi-field with the update by query API. + // + // **Change supported mapping parameters for an existing field** + // + // The documentation for each mapping parameter indicates whether you can update + // it for an existing field using this API. + // For example, you can use the update mapping API to update the `ignore_above` + // parameter. + // + // **Change the mapping of an existing field** + // + // Except for supported mapping parameters, you can't change the mapping or + // field type of an existing field. + // Changing an existing field could invalidate data that's already indexed. + // + // If you need to change the mapping of a field in a data stream's backing + // indices, refer to documentation about modifying data streams. + // If you need to change the mapping of a field in other indices, create a new + // index with the correct mapping and reindex your data into that index. + // + // **Rename a field** + // + // Renaming a field would invalidate data already indexed under the old field + // name. + // Instead, add an alias field to create an alternate field name. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html PutMapping indices_put_mapping.NewPutMapping // Update index settings. - // Changes dynamic index settings in real time. For data streams, index setting - // changes are applied to all backing indices by default. + // Changes dynamic index settings in real time. + // For data streams, index setting changes are applied to all backing indices by + // default. + // + // To revert a setting to the default value, use a null value. + // The list of per-index settings that can be updated dynamically on live + // indices can be found in index module documentation. + // To preserve existing settings from being updated, set the `preserve_existing` + // parameter to `true`. + // + // NOTE: You can only define new analyzers on closed indices. + // To add an analyzer, you must close the index, define the analyzer, and reopen + // the index. + // You cannot close the write index of a data stream. + // To update the analyzer for a data stream's write index and future backing + // indices, update the analyzer in the index template used by the stream. + // Then roll over the data stream to apply the new analyzer to the stream's + // write index and future backing indices. + // This affects searches and any new data added to the stream after the + // rollover. + // However, it does not affect the data stream's backing indices or their + // existing data. + // To change the analyzer for existing backing indices, you must create a new + // data stream and reindex your data into it. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html PutSettings indices_put_settings.NewPutSettings // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. + // Elasticsearch applies templates to new indices based on an index pattern that + // matches the index name. + // + // IMPORTANT: This documentation is about legacy index templates, which are + // deprecated and will be replaced by the composable templates introduced in + // Elasticsearch 7.8. + // + // Composable templates always take precedence over legacy templates. + // If no composable template matches a new index, matching legacy templates are + // applied according to their order. + // + // Index templates are only applied during index creation. + // Changes to index templates do not affect existing indices. + // Settings and mappings specified in create index API requests override any + // settings or mappings specified in an index template. + // + // You can use C-style `/* *\/` block comments in index templates. + // You can include comments anywhere in the request body, except before the + // opening curly bracket. + // + // **Indices matching multiple templates** + // + // Multiple index templates can potentially match an index, in this case, both + // the settings and mappings are merged into the final configuration of the + // index. + // The order of the merging can be controlled using the order parameter, with + // lower order being applied first, and higher orders overriding them. + // NOTE: Multiple matching templates with the same order value will result in a + // non-deterministic merging order. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates-v1.html PutTemplate indices_put_template.NewPutTemplate - // Returns information about ongoing and completed shard recoveries for one or - // more indices. - // For data streams, the API returns information for the stream’s backing + // Get index recovery information. + // Get information about ongoing and completed shard recoveries for one or more // indices. + // For data streams, the API returns information for the stream's backing + // indices. + // + // All recoveries, whether ongoing or complete, are kept in the cluster state + // and may be reported on at any time. + // + // Shard recovery is the process of initializing a shard copy, such as restoring + // a primary shard from a snapshot or creating a replica shard from a primary + // shard. + // When a shard recovery completes, the recovered shard is available for search + // and indexing. + // + // Recovery automatically occurs during the following processes: + // + // * When creating an index for the first time. + // * When a node rejoins the cluster and starts up any missing primary shard + // copies using the data that it holds in its data path. + // * Creation of new replica shard copies from the primary. + // * Relocation of a shard copy to a different node in the same cluster. + // * A snapshot restore operation. + // * A clone, shrink, or split operation. + // + // You can determine the cause of a shard recovery using the recovery or cat + // recovery APIs. + // + // The index recovery API reports information about completed recoveries only + // for shard copies that currently exist in the cluster. + // It only reports the last recovery for each shard copy and does not report + // historical information about earlier recoveries, nor does it report + // information about the recoveries of shard copies that no longer exist. + // This means that if a shard copy completes a recovery and then Elasticsearch + // relocates it onto a different node then the information about the original + // recovery will not be shown in the recovery API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-recovery.html Recovery indices_recovery.NewRecovery // Refresh an index. @@ -1611,63 +4551,365 @@ type Indices struct { // for search. // For data streams, the API runs the refresh operation on the stream’s backing // indices. + // + // By default, Elasticsearch periodically refreshes indices every second, but + // only on indices that have received one search request or more in the last 30 + // seconds. + // You can change this default interval with the `index.refresh_interval` + // setting. + // + // Refresh requests are synchronous and do not return a response until the + // refresh operation completes. + // + // Refreshes are resource-intensive. + // To ensure good cluster performance, it's recommended to wait for + // Elasticsearch's periodic refresh rather than performing an explicit refresh + // when possible. + // + // If your application workflow indexes documents and then runs a search to + // retrieve the indexed document, it's recommended to use the index API's + // `refresh=wait_for` query parameter option. + // This option ensures the indexing operation waits for a periodic refresh + // before running the search. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html Refresh indices_refresh.NewRefresh - // Reloads an index's search analyzers and their resources. + // Reload search analyzers. + // Reload an index's search analyzers and their resources. + // For data streams, the API reloads search analyzers and resources for the + // stream's backing indices. + // + // IMPORTANT: After reloading the search analyzers you should clear the request + // cache to make sure it doesn't contain responses derived from the previous + // versions of the analyzer. + // + // You can use the reload search analyzers API to pick up changes to synonym + // files used in the `synonym_graph` or `synonym` token filter of a search + // analyzer. + // To be eligible, the token filter must have an `updateable` flag of `true` and + // only be used in search analyzers. + // + // NOTE: This API does not perform a reload for each shard of an index. + // Instead, it performs a reload for each node containing index shards. + // As a result, the total shard count returned by the API can differ from the + // number of index shards. + // Because reloading affects every node with an index shard, it is important to + // update the synonym file on every data node in the cluster--including nodes + // that don't contain a shard replica--before using this API. + // This ensures the synonym file is updated everywhere in the cluster in case + // shards are relocated in the future. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-reload-analyzers.html ReloadSearchAnalyzers indices_reload_search_analyzers.NewReloadSearchAnalyzers - // Resolves the specified index expressions to return information about each - // cluster, including - // the local cluster, if included. - // Multiple patterns and remote clusters are supported. + // Resolve the cluster. + // + // Resolve the specified index expressions to return information about each + // cluster, including the local "querying" cluster, if included. + // If no index expression is provided, the API will return information about all + // the remote clusters that are configured on the querying cluster. + // + // This endpoint is useful before doing a cross-cluster search in order to + // determine which remote clusters should be included in a search. + // + // You use the same index expression with this endpoint as you would for + // cross-cluster search. + // Index and cluster exclusions are also supported with this endpoint. + // + // For each cluster in the index expression, information is returned about: + // + // * Whether the querying ("local") cluster is currently connected to each + // remote cluster specified in the index expression. Note that this endpoint + // actively attempts to contact the remote clusters, unlike the `remote/info` + // endpoint. + // * Whether each remote cluster is configured with `skip_unavailable` as `true` + // or `false`. + // * Whether there are any indices, aliases, or data streams on that cluster + // that match the index expression. + // * Whether the search is likely to have errors returned when you do the + // cross-cluster search (including any authorization errors if you do not have + // permission to query the index). + // * Cluster version information, including the Elasticsearch server version. + // + // For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns + // information about the local cluster and all remotely configured clusters that + // start with the alias `cluster*`. + // Each cluster returns information about whether it has any indices, aliases or + // data streams that match `my-index-*`. + // + // ## Note on backwards compatibility + // The ability to query without an index expression was added in version 8.18, + // so when + // querying remote clusters older than that, the local cluster will send the + // index + // expression `dummy*` to those remote clusters. Thus, if an errors occur, you + // may see a reference + // to that index expression even though you didn't request it. If it causes a + // problem, you can + // instead include an index expression like `*:*` to bypass the issue. + // + // ## Advantages of using this endpoint before a cross-cluster search + // + // You may want to exclude a cluster or index from a search when: + // + // * A remote cluster is not currently connected and is configured with + // `skip_unavailable=false`. Running a cross-cluster search under those + // conditions will cause the entire search to fail. + // * A cluster has no matching indices, aliases or data streams for the index + // expression (or your user does not have permissions to search them). For + // example, suppose your index expression is `logs*,remote1:logs*` and the + // remote1 cluster has no indices, aliases or data streams that match `logs*`. + // In that case, that cluster will return no results from that cluster if you + // include it in a cross-cluster search. + // * The index expression (combined with any query parameters you specify) will + // likely cause an exception to be thrown when you do the search. In these + // cases, the "error" field in the `_resolve/cluster` response will be present. + // (This is also where security/permission errors will be shown.) + // * A remote cluster is an older version that does not support the feature you + // want to use in your search. + // + // ## Test availability of remote clusters + // + // The `remote/info` endpoint is commonly used to test whether the "local" + // cluster (the cluster being queried) is connected to its remote clusters, but + // it does not necessarily reflect whether the remote cluster is available or + // not. + // The remote cluster may be available, while the local cluster is not currently + // connected to it. + // + // You can use the `_resolve/cluster` API to attempt to reconnect to remote + // clusters. + // For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. + // The `connected` field in the response will indicate whether it was + // successful. + // If a connection was (re-)established, this will also cause the `remote/info` + // endpoint to now indicate a connected status. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-cluster-api.html ResolveCluster indices_resolve_cluster.NewResolveCluster - // Resolves the specified name(s) and/or index patterns for indices, aliases, - // and data streams. + // Resolve indices. + // Resolve the names and/or index patterns for indices, aliases, and data + // streams. // Multiple patterns and remote clusters are supported. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-index-api.html ResolveIndex indices_resolve_index.NewResolveIndex // Roll over to a new index. - // Creates a new index for a data stream or index alias. + // TIP: It is recommended to use the index lifecycle rollover action to automate + // rollovers. + // + // The rollover API creates a new index for a data stream or index alias. + // The API behavior depends on the rollover target. + // + // **Roll over a data stream** + // + // If you roll over a data stream, the API creates a new write index for the + // stream. + // The stream's previous write index becomes a regular backing index. + // A rollover also increments the data stream's generation. + // + // **Roll over an index alias with a write index** + // + // TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a + // write index to manage time series data. + // Data streams replace this functionality, require less maintenance, and + // automatically integrate with data tiers. + // + // If an index alias points to multiple indices, one of the indices must be a + // write index. + // The rollover API creates a new write index for the alias with + // `is_write_index` set to `true`. + // The API also `sets is_write_index` to `false` for the previous write index. + // + // **Roll over an index alias with one index** + // + // If you roll over an index alias that points to only one index, the API + // creates a new index for the alias and removes the original index from the + // alias. + // + // NOTE: A rollover creates a new index and is subject to the + // `wait_for_active_shards` setting. + // + // **Increment index names for an alias** + // + // When you roll over an index alias, you can specify a name for the new index. + // If you don't specify a name and the current index ends with `-` and a number, + // such as `my-index-000001` or `my-index-3`, the new index name increments that + // number. + // For example, if you roll over an alias with a current index of + // `my-index-000001`, the rollover creates a new index named `my-index-000002`. + // This number is always six characters and zero-padded, regardless of the + // previous index's name. + // + // If you use an index alias for time series data, you can use date math in the + // index name to track the rollover date. + // For example, you can create an alias that points to an index named + // ``. + // If you create the index on May 6, 2099, the index's name is + // `my-index-2099.05.06-000001`. + // If you roll over the alias on May 7, 2099, the new index's name is + // `my-index-2099.05.07-000002`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html Rollover indices_rollover.NewRollover - // Returns low-level information about the Lucene segments in index shards. - // For data streams, the API returns information about the stream’s backing + // Get index segments. + // Get low-level information about the Lucene segments in index shards. + // For data streams, the API returns information about the stream's backing // indices. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-segments.html Segments indices_segments.NewSegments - // Retrieves store information about replica shards in one or more indices. - // For data streams, the API retrieves store information for the stream’s + // Get index shard stores. + // Get store information about replica shards in one or more indices. + // For data streams, the API retrieves store information for the stream's // backing indices. + // + // The index shard stores API returns the following information: + // + // * The node on which each replica shard exists. + // * The allocation ID for each replica shard. + // * A unique ID for each replica shard. + // * Any errors encountered while opening the shard index or from an earlier + // failure. + // + // By default, the API returns store information only for primary shards that + // are unassigned or have one or more unassigned replica shards. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html ShardStores indices_shard_stores.NewShardStores - // Shrinks an existing index into a new index with fewer primary shards. + // Shrink an index. + // Shrink an index into a new index with fewer primary shards. + // + // Before you can shrink an index: + // + // * The index must be read-only. + // * A copy of every shard in the index must reside on the same node. + // * The index must have a green health status. + // + // To make shard allocation easier, we recommend you also remove the index's + // replica shards. + // You can later re-add replica shards as part of the shrink operation. + // + // The requested number of primary shards in the target index must be a factor + // of the number of shards in the source index. + // For example an index with 8 primary shards can be shrunk into 4, 2 or 1 + // primary shards or an index with 15 primary shards can be shrunk into 5, 3 or + // 1. + // If the number of shards in the index is a prime number it can only be shrunk + // into a single primary shard + // Before shrinking, a (primary or replica) copy of every shard in the index + // must be present on the same node. + // + // The current write index on a data stream cannot be shrunk. In order to shrink + // the current write index, the data stream must first be rolled over so that a + // new write index is created and then the previous write index can be shrunk. + // + // A shrink operation: + // + // * Creates a new target index with the same definition as the source index, + // but with a smaller number of primary shards. + // * Hard-links segments from the source index into the target index. If the + // file system does not support hard-linking, then all segments are copied into + // the new index, which is a much more time consuming process. Also if using + // multiple data paths, shards on different data paths require a full copy of + // segment files if they are not on the same disk since hardlinks do not work + // across disks. + // * Recovers the target index as though it were a closed index which had just + // been re-opened. Recovers shards to the + // `.routing.allocation.initial_recovery._id` index setting. + // + // IMPORTANT: Indices can only be shrunk if they satisfy the following + // requirements: + // + // * The target index must not exist. + // * The source index must have more primary shards than the target index. + // * The number of primary shards in the target index must be a factor of the + // number of primary shards in the source index. The source index must have more + // primary shards than the target index. + // * The index must not contain more than 2,147,483,519 documents in total + // across all shards that will be shrunk into a single shard on the target index + // as this is the maximum number of docs that can fit into a single shard. + // * The node handling the shrink process must have sufficient free disk space + // to accommodate a second copy of the existing index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html Shrink indices_shrink.NewShrink // Simulate an index. - // Returns the index configuration that would be applied to the specified index - // from an existing index template. + // Get the index configuration that would be applied to the specified index from + // an existing index template. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-simulate-index.html SimulateIndexTemplate indices_simulate_index_template.NewSimulateIndexTemplate // Simulate an index template. - // Returns the index configuration that would be applied by a particular index + // Get the index configuration that would be applied by a particular index // template. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-simulate-template.html SimulateTemplate indices_simulate_template.NewSimulateTemplate - // Splits an existing index into a new index with more primary shards. + // Split an index. + // Split an index into a new index with more primary shards. + // * Before you can split an index: + // + // * The index must be read-only. + // * The cluster health status must be green. + // + // You can do make an index read-only with the following request using the add + // index block API: + // + // ``` + // PUT /my_source_index/_block/write + // ``` + // + // The current write index on a data stream cannot be split. + // In order to split the current write index, the data stream must first be + // rolled over so that a new write index is created and then the previous write + // index can be split. + // + // The number of times the index can be split (and the number of shards that + // each original shard can be split into) is determined by the + // `index.number_of_routing_shards` setting. + // The number of routing shards specifies the hashing space that is used + // internally to distribute documents across shards with consistent hashing. + // For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x + // 2 x 3) could be split by a factor of 2 or 3. + // + // A split operation: + // + // * Creates a new target index with the same definition as the source index, + // but with a larger number of primary shards. + // * Hard-links segments from the source index into the target index. If the + // file system doesn't support hard-linking, all segments are copied into the + // new index, which is a much more time consuming process. + // * Hashes all documents again, after low level files are created, to delete + // documents that belong to a different shard. + // * Recovers the target index as though it were a closed index which had just + // been re-opened. + // + // IMPORTANT: Indices can only be split if they satisfy the following + // requirements: + // + // * The target index must not exist. + // * The source index must have fewer primary shards than the target index. + // * The number of primary shards in the target index must be a multiple of the + // number of primary shards in the source index. + // * The node handling the split process must have sufficient free disk space to + // accommodate a second copy of the existing index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html Split indices_split.NewSplit - // Returns statistics for one or more indices. - // For data streams, the API retrieves statistics for the stream’s backing + // Get index statistics. + // For data streams, the API retrieves statistics for the stream's backing // indices. + // + // By default, the returned statistics are index-level with `primaries` and + // `total` aggregations. + // `primaries` are the values for only the primary shards. + // `total` are the accumulated values for both primary and replica shards. + // + // To get shard-level statistics, set the `level` parameter to `shards`. + // + // NOTE: When moving to another node, the shard-level statistics for a shard are + // cleared. + // Although the shard is no longer part of the node, that node retains any + // node-level statistics to which the shard contributed. // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html Stats indices_stats.NewStats - // Unfreezes an index. + // Unfreeze an index. + // When a frozen index is unfrozen, the index goes through the normal recovery + // process and becomes writeable again. // https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html Unfreeze indices_unfreeze.NewUnfreeze // Create or update an alias. // Adds a data stream or index to an alias. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-update-aliases UpdateAliases indices_update_aliases.NewUpdateAliases // Validate a query. // Validates a query without running it. @@ -1676,110 +4918,314 @@ type Indices struct { } type Inference struct { + // Perform chat completion inference + // https://www.elastic.co/guide/en/elasticsearch/reference/current/chat-completion-inference-api.html + ChatCompletionUnified inference_chat_completion_unified.NewChatCompletionUnified + // Perform completion inference on the service + // https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html + Completion inference_completion.NewCompletion // Delete an inference endpoint // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-inference-api.html Delete inference_delete.NewDelete // Get an inference endpoint // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-inference-api.html Get inference_get.NewGet - // Perform inference on the service + // Create an inference endpoint. + // When you create an inference endpoint, the associated machine learning model + // is automatically deployed if it is not already running. + // After creating the endpoint, wait for the model deployment to complete before + // using it. + // To verify the deployment status, use the get trained model statistics API. + // Look for `"state": "fully_allocated"` in the response and ensure that the + // `"allocation_count"` matches the `"target_allocation_count"`. + // Avoid creating multiple endpoints for the same model unless required, as each + // endpoint consumes significant resources. + // + // IMPORTANT: The inference APIs enable you to use certain services, such as + // built-in machine learning models (ELSER, E5), models uploaded through Eland, + // Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + // Anthropic, Watsonx.ai, or Hugging Face. + // For built-in models and models uploaded through Eland, the inference APIs + // offer an alternative way to use and manage trained models. + // However, if you do not plan to use the inference APIs to use these models or + // if you want to use non-NLP models, use the machine learning trained model + // APIs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-inference-api.html + Put inference_put.NewPut + // Create an OpenAI inference endpoint. + // + // Create an inference endpoint to perform an inference task with the `openai` + // service. + // + // When you create an inference endpoint, the associated machine learning model + // is automatically deployed if it is not already running. + // After creating the endpoint, wait for the model deployment to complete before + // using it. + // To verify the deployment status, use the get trained model statistics API. + // Look for `"state": "fully_allocated"` in the response and ensure that the + // `"allocation_count"` matches the `"target_allocation_count"`. + // Avoid creating multiple endpoints for the same model unless required, as each + // endpoint consumes significant resources. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-openai.html + PutOpenai inference_put_openai.NewPutOpenai + // Create a Watsonx inference endpoint. + // + // Create an inference endpoint to perform an inference task with the + // `watsonxai` service. + // You need an IBM Cloud Databases for Elasticsearch deployment to use the + // `watsonxai` inference service. + // You can provision one through the IBM catalog, the Cloud Databases CLI + // plug-in, the Cloud Databases API, or Terraform. + // + // When you create an inference endpoint, the associated machine learning model + // is automatically deployed if it is not already running. + // After creating the endpoint, wait for the model deployment to complete before + // using it. + // To verify the deployment status, use the get trained model statistics API. + // Look for `"state": "fully_allocated"` in the response and ensure that the + // `"allocation_count"` matches the `"target_allocation_count"`. + // Avoid creating multiple endpoints for the same model unless required, as each + // endpoint consumes significant resources. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-watsonx-ai.html + PutWatsonx inference_put_watsonx.NewPutWatsonx + // Perform rereanking inference on the service // https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html - Inference inference_inference.NewInference - // Create an inference endpoint - // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-inference-api.html - Put inference_put.NewPut + Rerank inference_rerank.NewRerank + // Perform sparse embedding inference on the service + // https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html + SparseEmbedding inference_sparse_embedding.NewSparseEmbedding + // Perform streaming inference. + // Get real-time responses for completion tasks by delivering answers + // incrementally, reducing response times during computation. + // This API works only with the completion task type. + // + // IMPORTANT: The inference APIs enable you to use certain services, such as + // built-in machine learning models (ELSER, E5), models uploaded through Eland, + // Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, + // Watsonx.ai, or Hugging Face. For built-in models and models uploaded through + // Eland, the inference APIs offer an alternative way to use and manage trained + // models. However, if you do not plan to use the inference APIs to use these + // models or if you want to use non-NLP models, use the machine learning trained + // model APIs. + // + // This API requires the `monitor_inference` cluster privilege (the built-in + // `inference_admin` and `inference_user` roles grant this privilege). You must + // use a client that supports streaming. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/stream-inference-api.html + StreamCompletion inference_stream_completion.NewStreamCompletion + // Perform text embedding inference on the service + // https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html + TextEmbedding inference_text_embedding.NewTextEmbedding + // Update an inference endpoint. + // + // Modify `task_settings`, secrets (within `service_settings`), or + // `num_allocations` for an inference endpoint, depending on the specific + // endpoint service and `task_type`. + // + // IMPORTANT: The inference APIs enable you to use certain services, such as + // built-in machine learning models (ELSER, E5), models uploaded through Eland, + // Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, + // Watsonx.ai, or Hugging Face. + // For built-in models and models uploaded through Eland, the inference APIs + // offer an alternative way to use and manage trained models. + // However, if you do not plan to use the inference APIs to use these models or + // if you want to use non-NLP models, use the machine learning trained model + // APIs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-inference-api.html + Update inference_update.NewUpdate } type Ingest struct { - // Deletes one or more existing ingest pipeline. + // Delete GeoIP database configurations. + // + // Delete one or more IP geolocation database configurations. + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-delete-geoip-database + DeleteGeoipDatabase ingest_delete_geoip_database.NewDeleteGeoipDatabase + // Delete IP geolocation database configurations. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-ip-location-database-api.html + DeleteIpLocationDatabase ingest_delete_ip_location_database.NewDeleteIpLocationDatabase + // Delete pipelines. + // Delete one or more ingest pipelines. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-pipeline-api.html DeletePipeline ingest_delete_pipeline.NewDeletePipeline - // Gets download statistics for GeoIP2 databases used with the geoip processor. + // Get GeoIP statistics. + // Get download statistics for GeoIP2 databases that are used with the GeoIP + // processor. // https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html GeoIpStats ingest_geo_ip_stats.NewGeoIpStats - // Returns information about one or more ingest pipelines. + // Get GeoIP database configurations. + // + // Get information about one or more IP geolocation database configurations. + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-get-geoip-database + GetGeoipDatabase ingest_get_geoip_database.NewGetGeoipDatabase + // Get IP geolocation database configurations. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ip-location-database-api.html + GetIpLocationDatabase ingest_get_ip_location_database.NewGetIpLocationDatabase + // Get pipelines. + // + // Get information about one or more ingest pipelines. // This API returns a local reference of the pipeline. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-pipeline-api.html GetPipeline ingest_get_pipeline.NewGetPipeline - // Extracts structured fields out of a single text field within a document. - // You choose which field to extract matched fields from, as well as the grok - // pattern you expect will match. + // Run a grok processor. + // Extract structured fields out of a single text field within a document. + // You must choose which field to extract matched fields from, as well as the + // grok pattern you expect will match. // A grok pattern is like a regular expression that supports aliased expressions // that can be reused. // https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html ProcessorGrok ingest_processor_grok.NewProcessorGrok - // Creates or updates an ingest pipeline. + // Create or update a GeoIP database configuration. + // + // Refer to the create or update IP geolocation database configuration API. + // https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-put-geoip-database + PutGeoipDatabase ingest_put_geoip_database.NewPutGeoipDatabase + // Create or update an IP geolocation database configuration. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-ip-location-database-api.html + PutIpLocationDatabase ingest_put_ip_location_database.NewPutIpLocationDatabase + // Create or update a pipeline. // Changes made using this API take effect immediately. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html PutPipeline ingest_put_pipeline.NewPutPipeline - // Executes an ingest pipeline against a set of provided documents. + // Simulate a pipeline. + // + // Run an ingest pipeline against a set of provided documents. + // You can either specify an existing pipeline to use with the provided + // documents or supply a pipeline definition in the body of the request. // https://www.elastic.co/guide/en/elasticsearch/reference/current/simulate-pipeline-api.html Simulate ingest_simulate.NewSimulate } type License struct { - // Deletes licensing information for the cluster + // Delete the license. + // + // When the license expires, your subscription level reverts to Basic. + // + // If the operator privileges feature is enabled, only operator users can use + // this API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-license.html Delete license_delete.NewDelete // Get license information. - // Returns information about your Elastic license, including its type, its - // status, when it was issued, and when it expires. - // For more information about the different types of licenses, refer to [Elastic - // Stack subscriptions](https://www.elastic.co/subscriptions). + // + // Get information about your Elastic license including its type, its status, + // when it was issued, and when it expires. + // + // >info + // > If the master node is generating a new cluster state, the get license API + // may return a `404 Not Found` response. + // > If you receive an unexpected 404 response after cluster startup, wait a + // short period and retry the request. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-license.html Get license_get.NewGet - // Retrieves information about the status of the basic license. + // Get the basic license status. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html GetBasicStatus license_get_basic_status.NewGetBasicStatus - // Retrieves information about the status of the trial license. + // Get the trial status. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trial-status.html GetTrialStatus license_get_trial_status.NewGetTrialStatus - // Updates the license for the cluster. + // Update the license. + // + // You can update your license at runtime without shutting down your nodes. + // License updates take effect immediately. + // If the license you are installing does not support all of the features that + // were available with your previous license, however, you are notified in the + // response. + // You must then re-submit the API request with the acknowledge parameter set to + // true. + // + // NOTE: If Elasticsearch security features are enabled and you are installing a + // gold or higher license, you must enable TLS on the transport networking layer + // before you install the license. + // If the operator privileges feature is enabled, only operator users can use + // this API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-license.html Post license_post.NewPost - // The start basic API enables you to initiate an indefinite basic license, - // which gives access to all the basic features. If the basic license does not - // support all of the features that are available with your current license, - // however, you are notified in the response. You must then re-submit the API - // request with the acknowledge parameter set to true. - // To check the status of your basic license, use the following API: [Get basic - // status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + // Start a basic license. + // + // Start an indefinite basic license, which gives access to all the basic + // features. + // + // NOTE: In order to start a basic license, you must not currently have a basic + // license. + // + // If the basic license does not support all of the features that are available + // with your current license, however, you are notified in the response. + // You must then re-submit the API request with the `acknowledge` parameter set + // to `true`. + // + // To check the status of your basic license, use the get basic license API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-basic.html PostStartBasic license_post_start_basic.NewPostStartBasic - // The start trial API enables you to start a 30-day trial, which gives access - // to all subscription features. + // Start a trial. + // Start a 30-day trial, which gives access to all subscription features. + // + // NOTE: You are allowed to start a trial only if your cluster has not already + // activated a trial for the current major product version. + // For example, if you have already activated a trial for v8.0, you cannot start + // a new trial until v9.0. You can, however, request an extended trial at + // https://www.elastic.co/trialextension. + // + // To check the status of your trial, use the get trial status API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trial.html PostStartTrial license_post_start_trial.NewPostStartTrial } type Logstash struct { - // Deletes a pipeline used for Logstash Central Management. + // Delete a Logstash pipeline. + // Delete a pipeline that is used for Logstash Central Management. + // If the request succeeds, you receive an empty response with an appropriate + // status code. // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-delete-pipeline.html DeletePipeline logstash_delete_pipeline.NewDeletePipeline - // Retrieves pipelines used for Logstash Central Management. + // Get Logstash pipelines. + // Get pipelines that are used for Logstash Central Management. // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get-pipeline.html GetPipeline logstash_get_pipeline.NewGetPipeline - // Creates or updates a pipeline used for Logstash Central Management. + // Create or update a Logstash pipeline. + // + // Create a pipeline that is used for Logstash Central Management. + // If the specified pipeline exists, it is replaced. // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-put-pipeline.html PutPipeline logstash_put_pipeline.NewPutPipeline } type Migration struct { - // Retrieves information about different cluster, node, and index level settings - // that use deprecated features that will be removed or changed in the next - // major version. + // Get deprecation information. + // Get information about different cluster, node, and index level settings that + // use deprecated features that will be removed or changed in the next major + // version. + // + // TIP: This APIs is designed for indirect use by the Upgrade Assistant. + // You are strongly recommended to use the Upgrade Assistant. // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html Deprecations migration_deprecations.NewDeprecations - // Find out whether system features need to be upgraded or not - // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html + // Get feature migration information. + // Version upgrades sometimes require changes to how features store + // configuration information and data in system indices. + // Check which features need to be migrated and the status of any migrations + // that are in progress. + // + // TIP: This API is designed for indirect use by the Upgrade Assistant. + // You are strongly recommended to use the Upgrade Assistant. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/feature-migration-api.html GetFeatureUpgradeStatus migration_get_feature_upgrade_status.NewGetFeatureUpgradeStatus - // Begin upgrades for system features - // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html + // Start the feature migration. + // Version upgrades sometimes require changes to how features store + // configuration information and data in system indices. + // This API starts the automatic migration process. + // + // Some functionality might be temporarily unavailable during the migration + // process. + // + // TIP: The API is designed for indirect use by the Upgrade Assistant. We + // strongly recommend you use the Upgrade Assistant. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/feature-migration-api.html PostFeatureUpgrade migration_post_feature_upgrade.NewPostFeatureUpgrade } type Ml struct { // Clear trained model deployment cache. + // // Cache will be cleared on all nodes where the trained model is assigned. // A trained model deployment may have an inference cache enabled. // As requests are handled by each allocated node, their responses may be cached @@ -1788,6 +5234,7 @@ type Ml struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-trained-model-deployment-cache.html ClearTrainedModelDeploymentCache ml_clear_trained_model_deployment_cache.NewClearTrainedModelDeploymentCache // Close anomaly detection jobs. + // // A job can be opened and closed multiple times throughout its lifecycle. A // closed job cannot receive data or perform analysis operations, but you can // still explore and navigate results. @@ -1807,7 +5254,8 @@ type Ml struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html CloseJob ml_close_job.NewCloseJob // Delete a calendar. - // Removes all scheduled events from a calendar, then deletes it. + // + // Remove all scheduled events from a calendar, then delete it. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html DeleteCalendar ml_delete_calendar.NewDeleteCalendar // Delete events from a calendar. @@ -1823,22 +5271,25 @@ type Ml struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html DeleteDatafeed ml_delete_datafeed.NewDeleteDatafeed // Delete expired ML data. - // Deletes all job results, model snapshots and forecast data that have exceeded + // + // Delete all job results, model snapshots and forecast data that have exceeded // their retention days period. Machine learning state documents that are not // associated with any job are also deleted. // You can limit the request to a single or set of anomaly detection jobs by // using a job identifier, a group name, a comma-separated list of jobs, or a // wildcard expression. You can delete expired data for all anomaly detection - // jobs by using _all, by specifying * as the , or by omitting the - // . + // jobs by using `_all`, by specifying `*` as the ``, or by omitting the + // ``. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html DeleteExpiredData ml_delete_expired_data.NewDeleteExpiredData // Delete a filter. + // // If an anomaly detection job references the filter, you cannot delete the // filter. You must update or delete the job before you can delete the filter. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html DeleteFilter ml_delete_filter.NewDeleteFilter // Delete forecasts from a job. + // // By default, forecasts are retained for 14 days. You can specify a // different retention period with the `expires_in` parameter in the forecast // jobs API. The delete forecast API enables you to delete one or more @@ -1846,6 +5297,7 @@ type Ml struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html DeleteForecast ml_delete_forecast.NewDeleteForecast // Delete an anomaly detection job. + // // All job configuration, model state and results are deleted. // It is not currently possible to delete multiple jobs using wildcards or a // comma separated list. If you delete a job that has a datafeed, the request @@ -1855,29 +5307,35 @@ type Ml struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html DeleteJob ml_delete_job.NewDeleteJob // Delete a model snapshot. + // // You cannot delete the active model snapshot. To delete that snapshot, first // revert to a different one. To identify the active model snapshot, refer to // the `model_snapshot_id` in the results from the get jobs API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html DeleteModelSnapshot ml_delete_model_snapshot.NewDeleteModelSnapshot // Delete an unreferenced trained model. + // // The request deletes a trained inference model that is not referenced by an // ingest pipeline. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models.html DeleteTrainedModel ml_delete_trained_model.NewDeleteTrainedModel // Delete a trained model alias. + // // This API deletes an existing model alias that refers to a trained model. If // the model alias is missing or refers to a model other than the one identified // by the `model_id`, this API returns an error. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models-aliases.html DeleteTrainedModelAlias ml_delete_trained_model_alias.NewDeleteTrainedModelAlias // Estimate job model memory usage. - // Makes an estimation of the memory usage for an anomaly detection job model. - // It is based on analysis configuration details for the job and cardinality + // + // Make an estimation of the memory usage for an anomaly detection job model. + // The estimate is based on analysis configuration details for the job and + // cardinality // estimates for the fields it references. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-estimate-model-memory.html EstimateModelMemory ml_estimate_model_memory.NewEstimateModelMemory // Evaluate data frame analytics. + // // The API packages together commonly used evaluation metrics for various types // of machine learning features. This has been designed for use on indexes // created by data frame analytics. Evaluation requires both a ground truth @@ -1885,6 +5343,7 @@ type Ml struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanalytics.html EvaluateDataFrame ml_evaluate_data_frame.NewEvaluateDataFrame // Explain data frame analytics config. + // // This API provides explanations for a data frame analytics config that either // exists already or one that has not been created yet. The following // explanations are provided: @@ -1893,9 +5352,9 @@ type Ml struct { // deciding the appropriate value for model_memory_limit setting later on. // If you have object fields or fields that are excluded via source filtering, // they are not included in the explanation. - // http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html ExplainDataFrameAnalytics ml_explain_data_frame_analytics.NewExplainDataFrameAnalytics - // Forces any buffered data to be processed by the job. + // Force buffered data to be processed. // The flush jobs API is only applicable when sending data for analysis using // the post data API. Depending on the content of the buffer, then it might // additionally calculate new results. Both flush and close operations are @@ -1906,37 +5365,37 @@ type Ml struct { // analyzing further data. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html FlushJob ml_flush_job.NewFlushJob - // Predicts the future behavior of a time series by using its historical - // behavior. + // Predict future behavior of a time series. // // Forecasts are not supported for jobs that perform population analysis; an // error occurs if you try to create a forecast for a job that has an - // `over_field_name` in its configuration. + // `over_field_name` in its configuration. Forcasts predict future behavior + // based on historical data. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html Forecast ml_forecast.NewForecast - // Retrieves anomaly detection job results for one or more buckets. + // Get anomaly detection job results for buckets. // The API presents a chronological view of the records, grouped by bucket. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html GetBuckets ml_get_buckets.NewGetBuckets - // Retrieves information about the scheduled events in calendars. + // Get info about events in calendars. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html GetCalendarEvents ml_get_calendar_events.NewGetCalendarEvents - // Retrieves configuration information for calendars. + // Get calendar configuration info. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html GetCalendars ml_get_calendars.NewGetCalendars - // Retrieves anomaly detection job results for one or more categories. + // Get anomaly detection job results for categories. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html GetCategories ml_get_categories.NewGetCategories - // Retrieves configuration information for data frame analytics jobs. + // Get data frame analytics job configuration info. // You can get information for multiple data frame analytics jobs in a single // API request by using a comma-separated list of data frame analytics jobs or a // wildcard expression. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics.html GetDataFrameAnalytics ml_get_data_frame_analytics.NewGetDataFrameAnalytics - // Retrieves usage information for data frame analytics jobs. + // Get data frame analytics jobs usage info. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html GetDataFrameAnalyticsStats ml_get_data_frame_analytics_stats.NewGetDataFrameAnalyticsStats - // Retrieves usage information for datafeeds. + // Get datafeeds usage info. // You can get statistics for multiple datafeeds in a single API request by // using a comma-separated list of datafeeds or a wildcard expression. You can // get statistics for all datafeeds by using `_all`, by specifying `*` as the @@ -1945,7 +5404,7 @@ type Ml struct { // This API returns a maximum of 10,000 datafeeds. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html GetDatafeedStats ml_get_datafeed_stats.NewGetDatafeedStats - // Retrieves configuration information for datafeeds. + // Get datafeeds configuration info. // You can get information for multiple datafeeds in a single API request by // using a comma-separated list of datafeeds or a wildcard expression. You can // get information for all datafeeds by using `_all`, by specifying `*` as the @@ -1953,39 +5412,41 @@ type Ml struct { // This API returns a maximum of 10,000 datafeeds. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html GetDatafeeds ml_get_datafeeds.NewGetDatafeeds - // Retrieves filters. + // Get filters. // You can get a single filter or all filters. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html GetFilters ml_get_filters.NewGetFilters - // Retrieves anomaly detection job results for one or more influencers. + // Get anomaly detection job results for influencers. // Influencers are the entities that have contributed to, or are to blame for, // the anomalies. Influencer results are available only if an // `influencer_field_name` is specified in the job configuration. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html GetInfluencers ml_get_influencers.NewGetInfluencers - // Retrieves usage information for anomaly detection jobs. + // Get anomaly detection jobs usage info. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html GetJobStats ml_get_job_stats.NewGetJobStats - // Retrieves configuration information for anomaly detection jobs. + // Get anomaly detection jobs configuration info. // You can get information for multiple anomaly detection jobs in a single API // request by using a group name, a comma-separated list of jobs, or a wildcard // expression. You can get information for all anomaly detection jobs by using // `_all`, by specifying `*` as the ``, or by omitting the ``. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html GetJobs ml_get_jobs.NewGetJobs + // Get machine learning memory usage info. // Get information about how machine learning jobs and trained models are using // memory, // on each node, both within the JVM heap, and natively, outside of the JVM. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-memory.html GetMemoryStats ml_get_memory_stats.NewGetMemoryStats - // Retrieves usage information for anomaly detection job model snapshot - // upgrades. + // Get anomaly detection job model snapshot upgrade usage info. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model-snapshot-upgrade-stats.html GetModelSnapshotUpgradeStats ml_get_model_snapshot_upgrade_stats.NewGetModelSnapshotUpgradeStats - // Retrieves information about model snapshots. + // Get model snapshots info. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html GetModelSnapshots ml_get_model_snapshots.NewGetModelSnapshots - // Retrieves overall bucket results that summarize the bucket results of + // Get overall bucket results. + // + // Retrievs overall bucket results that summarize the bucket results of // multiple anomaly detection jobs. // // The `overall_score` is calculated by combining the scores of all the @@ -2004,7 +5465,7 @@ type Ml struct { // jobs' largest bucket span. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html GetOverallBuckets ml_get_overall_buckets.NewGetOverallBuckets - // Retrieves anomaly records for an anomaly detection job. + // Get anomaly records for an anomaly detection job. // Records contain the detailed analytical results. They describe the anomalous // activity that has been identified in the input data based on the detector // configuration. @@ -2017,19 +5478,20 @@ type Ml struct { // number of detectors. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html GetRecords ml_get_records.NewGetRecords - // Retrieves configuration information for a trained model. + // Get trained model configuration info. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models.html GetTrainedModels ml_get_trained_models.NewGetTrainedModels - // Retrieves usage information for trained models. You can get usage information - // for multiple trained + // Get trained models usage info. + // You can get usage information for multiple trained // models in a single API request by using a comma-separated list of model IDs // or a wildcard expression. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models-stats.html GetTrainedModelsStats ml_get_trained_models_stats.NewGetTrainedModelsStats - // Evaluates a trained model. + // Evaluate a trained model. // https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html InferTrainedModel ml_infer_trained_model.NewInferTrainedModel - // Returns defaults and limits used by machine learning. + // Get machine learning information. + // Get defaults and limits used by machine learning. // This endpoint is designed to be used by a user interface that needs to fully // understand machine learning configurations where some options are not // specified, meaning that the defaults should be used. This endpoint may be @@ -2039,19 +5501,19 @@ type Ml struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html Info ml_info.NewInfo // Open anomaly detection jobs. - // An anomaly detection job must be opened in order for it to be ready to - // receive and analyze data. It can be opened and closed multiple times - // throughout its lifecycle. + // + // An anomaly detection job must be opened to be ready to receive and analyze + // data. It can be opened and closed multiple times throughout its lifecycle. // When you open a new job, it starts with an empty model. // When you open an existing job, the most recent model state is automatically // loaded. The job is ready to resume its analysis from where it left off, once // new data is received. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html OpenJob ml_open_job.NewOpenJob - // Adds scheduled events to a calendar. + // Add scheduled events to the calendar. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html PostCalendarEvents ml_post_calendar_events.NewPostCalendarEvents - // Sends data to an anomaly detection job for analysis. + // Send data to an anomaly detection job for analysis. // // IMPORTANT: For each job, data can be accepted from only a single connection // at a time. @@ -2059,10 +5521,11 @@ type Ml struct { // a comma-separated list. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html PostData ml_post_data.NewPostData - // Previews the extracted features used by a data frame analytics config. - // http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html + // Preview features used by data frame analytics. + // Preview the extracted features used by a data frame analytics config. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html PreviewDataFrameAnalytics ml_preview_data_frame_analytics.NewPreviewDataFrameAnalytics - // Previews a datafeed. + // Preview a datafeed. // This API returns the first "page" of search results from a datafeed. // You can preview an existing datafeed or provide configuration details for a // datafeed @@ -2078,24 +5541,36 @@ type Ml struct { // You can also use secondary authorization headers to supply the credentials. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html PreviewDatafeed ml_preview_datafeed.NewPreviewDatafeed - // Creates a calendar. + // Create a calendar. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html PutCalendar ml_put_calendar.NewPutCalendar - // Adds an anomaly detection job to a calendar. + // Add anomaly detection job to calendar. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html PutCalendarJob ml_put_calendar_job.NewPutCalendarJob - // Instantiates a data frame analytics job. + // Create a data frame analytics job. // This API creates a data frame analytics job that performs an analysis on the // source indices and stores the outcome in a destination index. + // By default, the query used in the source configuration is `{"match_all": + // {}}`. + // + // If the destination index does not exist, it is created automatically when you + // start the job. + // + // If you supply only a subset of the regression or classification parameters, + // hyperparameter optimization occurs. It determines a value for each of the + // undefined parameters. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-dfanalytics.html PutDataFrameAnalytics ml_put_data_frame_analytics.NewPutDataFrameAnalytics - // Instantiates a datafeed. + // Create a datafeed. // Datafeeds retrieve data from Elasticsearch for analysis by an anomaly // detection job. // You can associate only one datafeed with each anomaly detection job. // The datafeed contains a query that runs at a defined interval (`frequency`). // If you are concerned about delayed data, you can add a delay (`query_delay') // at each interval. + // By default, the datafeed uses the following query: `{"match_all": {"boost": + // 1}}`. + // // When Elasticsearch security features are enabled, your datafeed remembers // which roles the user who created it had // at the time of creation and runs the query using those same roles. If you @@ -2107,7 +5582,7 @@ type Ml struct { // the `.ml-config` index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html PutDatafeed ml_put_datafeed.NewPutDatafeed - // Instantiates a filter. + // Create a filter. // A filter contains a list of strings. It can be used by one or more anomaly // detection jobs. // Specifically, filters are referenced in the `custom_rules` property of @@ -2115,16 +5590,21 @@ type Ml struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html PutFilter ml_put_filter.NewPutFilter // Create an anomaly detection job. + // // If you include a `datafeed_config`, you must have read index privileges on // the source index. + // If you include a `datafeed_config` but do not provide a query, the datafeed + // uses `{"match_all": {"boost": 1}}`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html PutJob ml_put_job.NewPutJob - // Enables you to supply a trained model that is not created by data frame + // Create a trained model. + // Enable you to supply a trained model that is not created by data frame // analytics. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models.html PutTrainedModel ml_put_trained_model.NewPutTrainedModel - // Creates or updates a trained model alias. A trained model alias is a logical - // name used to reference a single trained model. + // Create or update a trained model alias. + // A trained model alias is a logical name used to reference a single trained + // model. // You can use aliases instead of trained model identifiers to make it easier to // reference your models. For example, you can use aliases in inference // aggregations and processors. @@ -2141,23 +5621,23 @@ type Ml struct { // returns a warning. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models-aliases.html PutTrainedModelAlias ml_put_trained_model_alias.NewPutTrainedModelAlias - // Creates part of a trained model definition. + // Create part of a trained model definition. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html PutTrainedModelDefinitionPart ml_put_trained_model_definition_part.NewPutTrainedModelDefinitionPart - // Creates a trained model vocabulary. + // Create a trained model vocabulary. // This API is supported only for natural language processing (NLP) models. // The vocabulary is stored in the index as described in // `inference_config.*.vocabulary` of the trained model definition. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html PutTrainedModelVocabulary ml_put_trained_model_vocabulary.NewPutTrainedModelVocabulary - // Resets an anomaly detection job. + // Reset an anomaly detection job. // All model state and results are deleted. The job is ready to start over as if // it had just been created. // It is not currently possible to reset multiple jobs using wildcards or a // comma separated list. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-reset-job.html ResetJob ml_reset_job.NewResetJob - // Reverts to a specific snapshot. + // Revert to a snapshot. // The machine learning features react quickly to anomalous input, learning new // behaviors in data. Highly anomalous input increases the variance in the // models whilst the system learns whether this is a new step-change in behavior @@ -2167,6 +5647,7 @@ type Ml struct { // snapshot after Black Friday or a critical system failure. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html RevertModelSnapshot ml_revert_model_snapshot.NewRevertModelSnapshot + // Set upgrade_mode for ML indices. // Sets a cluster wide upgrade_mode setting that prepares machine learning // indices for an upgrade. // When upgrading your cluster, in some circumstances you must restart your @@ -2181,7 +5662,7 @@ type Ml struct { // machine learning info API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html SetUpgradeMode ml_set_upgrade_mode.NewSetUpgradeMode - // Starts a data frame analytics job. + // Start a data frame analytics job. // A data frame analytics job can be started and stopped multiple times // throughout its lifecycle. // If the destination index does not exist, it is created automatically the @@ -2195,7 +5676,7 @@ type Ml struct { // the destination index in advance with custom settings and mappings. // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytics.html StartDataFrameAnalytics ml_start_data_frame_analytics.NewStartDataFrameAnalytics - // Starts one or more datafeeds. + // Start datafeeds. // // A datafeed must be started in order to retrieve data from Elasticsearch. A // datafeed can be started and stopped @@ -2217,28 +5698,28 @@ type Ml struct { // credentials are used instead. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html StartDatafeed ml_start_datafeed.NewStartDatafeed - // Starts a trained model deployment, which allocates the model to every machine - // learning node. + // Start a trained model deployment. + // It allocates the model to every machine learning node. // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trained-model-deployment.html StartTrainedModelDeployment ml_start_trained_model_deployment.NewStartTrainedModelDeployment - // Stops one or more data frame analytics jobs. + // Stop data frame analytics jobs. // A data frame analytics job can be started and stopped multiple times // throughout its lifecycle. // https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics.html StopDataFrameAnalytics ml_stop_data_frame_analytics.NewStopDataFrameAnalytics - // Stops one or more datafeeds. + // Stop datafeeds. // A datafeed that is stopped ceases to retrieve data from Elasticsearch. A // datafeed can be started and stopped // multiple times throughout its lifecycle. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html StopDatafeed ml_stop_datafeed.NewStopDatafeed - // Stops a trained model deployment. + // Stop a trained model deployment. // https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-trained-model-deployment.html StopTrainedModelDeployment ml_stop_trained_model_deployment.NewStopTrainedModelDeployment - // Updates an existing data frame analytics job. + // Update a data frame analytics job. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalytics.html UpdateDataFrameAnalytics ml_update_data_frame_analytics.NewUpdateDataFrameAnalytics - // Updates the properties of a datafeed. + // Update a datafeed. // You must stop and start the datafeed for the changes to be applied. // When Elasticsearch security features are enabled, your datafeed remembers // which roles the user who updated it had at @@ -2247,21 +5728,24 @@ type Ml struct { // those credentials are used instead. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html UpdateDatafeed ml_update_datafeed.NewUpdateDatafeed + // Update a filter. // Updates the description of a filter, adds items, or removes items from the // list. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html UpdateFilter ml_update_filter.NewUpdateFilter + // Update an anomaly detection job. // Updates certain properties of an anomaly detection job. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html UpdateJob ml_update_job.NewUpdateJob + // Update a snapshot. // Updates certain properties of a snapshot. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html UpdateModelSnapshot ml_update_model_snapshot.NewUpdateModelSnapshot - // Starts a trained model deployment, which allocates the model to every machine - // learning node. + // Update a trained model deployment. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-trained-model-deployment.html UpdateTrainedModelDeployment ml_update_trained_model_deployment.NewUpdateTrainedModelDeployment - // Upgrades an anomaly detection model snapshot to the latest major version. + // Upgrade a snapshot. + // Upgrade an anomaly detection model snapshot to the latest major version. // Over time, older snapshot formats are deprecated and removed. Anomaly // detection jobs support only snapshots that are from the current or previous // major version. @@ -2272,49 +5756,74 @@ type Ml struct { // job. // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-upgrade-job-model-snapshot.html UpgradeJobSnapshot ml_upgrade_job_snapshot.NewUpgradeJobSnapshot - // Validates an anomaly detection job. + // Validate an anomaly detection job. // https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html Validate ml_validate.NewValidate - // Validates an anomaly detection detector. - // https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html + // Validate an anomaly detection job. + // https://www.elastic.co/docs/api/doc/elasticsearch/v8 ValidateDetector ml_validate_detector.NewValidateDetector } type Monitoring struct { - // Used by the monitoring features to send monitoring data. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/monitor-elasticsearch-cluster.html + // Send monitoring data. + // This API is used by the monitoring features to send monitoring data. + // https://www.elastic.co/docs/api/doc/elasticsearch/v8 Bulk monitoring_bulk.NewBulk } type Nodes struct { - // You can use this API to clear the archived repositories metering information - // in the cluster. + // Clear the archived repositories metering. + // Clear the archived repositories metering information in the cluster. // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html ClearRepositoriesMeteringArchive nodes_clear_repositories_metering_archive.NewClearRepositoriesMeteringArchive - // You can use the cluster repositories metering API to retrieve repositories - // metering information in a cluster. - // This API exposes monotonically non-decreasing counters and it’s expected that - // clients would durably store the - // information needed to compute aggregations over a period of time. - // Additionally, the information exposed by this - // API is volatile, meaning that it won’t be present after node restarts. + // Get cluster repositories metering. + // Get repositories metering information for a cluster. + // This API exposes monotonically non-decreasing counters and it is expected + // that clients would durably store the information needed to compute + // aggregations over a period of time. + // Additionally, the information exposed by this API is volatile, meaning that + // it will not be present after node restarts. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html GetRepositoriesMeteringInfo nodes_get_repositories_metering_info.NewGetRepositoriesMeteringInfo - // This API yields a breakdown of the hot threads on each selected node in the - // cluster. - // The output is plain text with a breakdown of each node’s top hot threads. + // Get the hot threads for nodes. + // Get a breakdown of the hot threads on each selected node in the cluster. + // The output is plain text with a breakdown of the top hot threads for each + // node. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-hot-threads.html HotThreads nodes_hot_threads.NewHotThreads - // Returns cluster nodes information. + // Get node information. + // + // By default, the API returns all attributes and core settings for cluster + // nodes. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html Info nodes_info.NewInfo - // Reloads the keystore on nodes in the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html#reloadable-secure-settings + // Reload the keystore on nodes in the cluster. + // + // Secure settings are stored in an on-disk keystore. Certain of these settings + // are reloadable. + // That is, you can change them on disk and reload them without restarting any + // nodes in the cluster. + // When you have updated reloadable secure settings in your keystore, you can + // use this API to reload those settings on each node. + // + // When the Elasticsearch keystore is password protected and not simply + // obfuscated, you must provide the password for the keystore when you reload + // the secure settings. + // Reloading the settings for the whole cluster assumes that the keystores for + // all nodes are protected with the same password; this method is allowed only + // when inter-node communications are encrypted. + // Alternatively, you can reload the secure settings on each node by locally + // accessing the API and passing the node-specific Elasticsearch keystore + // password. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-reload-secure-settings.html ReloadSecureSettings nodes_reload_secure_settings.NewReloadSecureSettings - // Returns cluster nodes statistics. + // Get node statistics. + // Get statistics for nodes in a cluster. + // By default, all stats are returned. You can limit the returned information by + // using metrics. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html Stats nodes_stats.NewStats - // Returns information on the usage of features. + // Get feature usage information. // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-usage.html Usage nodes_usage.NewUsage } @@ -2336,105 +5845,322 @@ type Profiling struct { } type QueryRules struct { - // Deletes a query rule within a query ruleset. + // Delete a query rule. + // Delete a query rule within a query ruleset. + // This is a destructive action that is only recoverable by re-adding the same + // rule with the create or update query rule API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-query-rule.html DeleteRule query_rules_delete_rule.NewDeleteRule - // Deletes a query ruleset. + // Delete a query ruleset. + // Remove a query ruleset and its associated data. + // This is a destructive action that is not recoverable. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-query-ruleset.html DeleteRuleset query_rules_delete_ruleset.NewDeleteRuleset - // Returns the details about a query rule within a query ruleset + // Get a query rule. + // Get details about a query rule within a query ruleset. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-query-rule.html GetRule query_rules_get_rule.NewGetRule - // Returns the details about a query ruleset + // Get a query ruleset. + // Get details about a query ruleset. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-query-ruleset.html GetRuleset query_rules_get_ruleset.NewGetRuleset - // Returns summarized information about existing query rulesets. + // Get all query rulesets. + // Get summarized information about the query rulesets. // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-query-rulesets.html ListRulesets query_rules_list_rulesets.NewListRulesets - // Creates or updates a query rule within a query ruleset. + // Create or update a query rule. + // Create or update a query rule within a query ruleset. + // + // IMPORTANT: Due to limitations within pinned queries, you can only pin + // documents using ids or docs, but cannot use both in single rule. + // It is advised to use one or the other in query rulesets, to avoid errors. + // Additionally, pinned queries have a maximum limit of 100 pinned hits. + // If multiple matching rules pin more than 100 documents, only the first 100 + // documents are pinned in the order they are specified in the ruleset. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-query-rule.html PutRule query_rules_put_rule.NewPutRule - // Creates or updates a query ruleset. + // Create or update a query ruleset. + // There is a limit of 100 rules per ruleset. + // This limit can be increased by using the + // `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + // + // IMPORTANT: Due to limitations within pinned queries, you can only select + // documents using `ids` or `docs`, but cannot use both in single rule. + // It is advised to use one or the other in query rulesets, to avoid errors. + // Additionally, pinned queries have a maximum limit of 100 pinned hits. + // If multiple matching rules pin more than 100 documents, only the first 100 + // documents are pinned in the order they are specified in the ruleset. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-query-ruleset.html PutRuleset query_rules_put_ruleset.NewPutRuleset + // Test a query ruleset. + // Evaluate match criteria against a query ruleset to identify the rules that + // would match that criteria. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/test-query-ruleset.html + Test query_rules_test.NewTest } type Rollup struct { - // Deletes an existing rollup job. + // Delete a rollup job. + // + // A job must be stopped before it can be deleted. + // If you attempt to delete a started job, an error occurs. + // Similarly, if you attempt to delete a nonexistent job, an exception occurs. + // + // IMPORTANT: When you delete a job, you remove only the process that is + // actively monitoring and rolling up data. + // The API does not delete any previously rolled up data. + // This is by design; a user may wish to roll up a static data set. + // Because the data set is static, after it has been fully rolled up there is no + // need to keep the indexing rollup job around (as there will be no new data). + // Thus the job can be deleted, leaving behind the rolled up data for analysis. + // If you wish to also remove the rollup data and the rollup index contains the + // data for only a single job, you can delete the whole rollup index. + // If the rollup index stores data from several jobs, you must issue a + // delete-by-query that targets the rollup job's identifier in the rollup index. + // For example: + // + // ``` + // POST my_rollup_index/_delete_by_query + // { + // "query": { + // "term": { + // "_rollup.id": "the_rollup_job_id" + // } + // } + // } + // ``` // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-delete-job.html DeleteJob rollup_delete_job.NewDeleteJob - // Retrieves the configuration, stats, and status of rollup jobs. + // Get rollup job information. + // Get the configuration, stats, and status of rollup jobs. + // + // NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. + // If a job was created, ran for a while, then was deleted, the API does not + // return any details about it. + // For details about a historical rollup job, the rollup capabilities API may be + // more useful. // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-job.html GetJobs rollup_get_jobs.NewGetJobs - // Returns the capabilities of any rollup jobs that have been configured for a + // Get the rollup job capabilities. + // Get the capabilities of any rollup jobs that have been configured for a // specific index or index pattern. + // + // This API is useful because a rollup job is often configured to rollup only a + // subset of fields from the source index. + // Furthermore, only certain aggregations can be configured for various fields, + // leading to a limited subset of functionality depending on that configuration. + // This API enables you to inspect an index and determine: + // + // 1. Does this index have associated rollup data somewhere in the cluster? + // 2. If yes to the first question, what fields were rolled up, what + // aggregations can be performed, and where does the data live? // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-caps.html GetRollupCaps rollup_get_rollup_caps.NewGetRollupCaps - // Returns the rollup capabilities of all jobs inside of a rollup index (for - // example, the index where rollup data is stored). + // Get the rollup index capabilities. + // Get the rollup capabilities of all jobs inside of a rollup index. + // A single rollup index may store the data for multiple rollup jobs and may + // have a variety of capabilities depending on those jobs. This API enables you + // to determine: + // + // * What jobs are stored in an index (or indices specified via a pattern)? + // * What target indices were rolled up, what fields were used in those rollups, + // and what aggregations can be performed on each job? // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-index-caps.html GetRollupIndexCaps rollup_get_rollup_index_caps.NewGetRollupIndexCaps - // Creates a rollup job. + // Create a rollup job. + // + // WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + // fail with a message about the deprecation and planned removal of rollup + // features. A cluster needs to contain either a rollup job or a rollup index in + // order for this API to be allowed to run. + // + // The rollup job configuration contains all the details about how the job + // should run, when it indexes documents, and what future queries will be able + // to run against the rollup index. + // + // There are three main sections to the job configuration: the logistical + // details about the job (for example, the cron schedule), the fields that are + // used for grouping, and what metrics to collect for each group. + // + // Jobs are created in a `STOPPED` state. You can start them with the start + // rollup jobs API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-put-job.html PutJob rollup_put_job.NewPutJob - // Enables searching rolled-up data using the standard Query DSL. + // Search rolled-up data. + // The rollup search endpoint is needed because, internally, rolled-up documents + // utilize a different document structure than the original data. + // It rewrites standard Query DSL into a format that matches the rollup + // documents then takes the response and rewrites it back to what a client would + // expect given the original query. + // + // The request body supports a subset of features from the regular search API. + // The following functionality is not available: + // + // `size`: Because rollups work on pre-aggregated data, no search hits can be + // returned and so size must be set to zero or omitted entirely. + // `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are + // similarly disallowed. + // + // **Searching both historical rollup and non-rollup data** + // + // The rollup search API has the capability to search across both "live" + // non-rollup data and the aggregated rollup data. + // This is done by simply adding the live indices to the URI. For example: + // + // ``` + // GET sensor-1,sensor_rollup/_rollup_search + // { + // "size": 0, + // "aggregations": { + // "max_temperature": { + // "max": { + // "field": "temperature" + // } + // } + // } + // } + // ``` + // + // The rollup search endpoint does two things when the search runs: + // + // * The original request is sent to the non-rollup index unaltered. + // * A rewritten version of the original request is sent to the rollup index. + // + // When the two responses are received, the endpoint rewrites the rollup + // response and merges the two together. + // During the merging process, if there is any overlap in buckets between the + // two responses, the buckets from the non-rollup index are used. // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-search.html RollupSearch rollup_rollup_search.NewRollupSearch - // Starts an existing, stopped rollup job. + // Start rollup jobs. + // If you try to start a job that does not exist, an exception occurs. + // If you try to start a job that is already started, nothing happens. // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-start-job.html StartJob rollup_start_job.NewStartJob - // Stops an existing, started rollup job. + // Stop rollup jobs. + // If you try to stop a job that does not exist, an exception occurs. + // If you try to stop a job that is already stopped, nothing happens. + // + // Since only a stopped job can be deleted, it can be useful to block the API + // until the indexer has fully stopped. + // This is accomplished with the `wait_for_completion` query parameter, and + // optionally a timeout. For example: + // + // ``` + // POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s + // ``` + // The parameter blocks the API call from returning until either the job has + // moved to STOPPED or the specified time has elapsed. + // If the specified time elapses without the job moving to STOPPED, a timeout + // exception occurs. // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-stop-job.html StopJob rollup_stop_job.NewStopJob } type SearchApplication struct { - // Deletes a search application. + // Delete a search application. + // + // Remove a search application and its associated alias. Indices attached to the + // search application are not removed. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-search-application.html Delete search_application_delete.NewDelete // Delete a behavioral analytics collection. + // The associated data stream is also deleted. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-analytics-collection.html DeleteBehavioralAnalytics search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalytics - // Returns the details about a search application + // Get search application details. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-search-application.html Get search_application_get.NewGet - // Returns the existing behavioral analytics collections. + // Get behavioral analytics collections. // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-analytics-collection.html GetBehavioralAnalytics search_application_get_behavioral_analytics.NewGetBehavioralAnalytics - // Returns the existing search applications. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-search-applications.html + // Get search applications. + // Get information about search applications. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-analytics-collection.html List search_application_list.NewList - // Creates or updates a search application. + // Create a behavioral analytics collection event. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/post-analytics-collection-event.html + PostBehavioralAnalyticsEvent search_application_post_behavioral_analytics_event.NewPostBehavioralAnalyticsEvent + // Create or update a search application. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-search-application.html Put search_application_put.NewPut - // Creates a behavioral analytics collection. + // Create a behavioral analytics collection. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-analytics-collection.html PutBehavioralAnalytics search_application_put_behavioral_analytics.NewPutBehavioralAnalytics - // Perform a search against a search application. + // Render a search application query. + // Generate an Elasticsearch query using the specified query parameters and the + // search template associated with the search application or a default template + // if none is specified. + // If a parameter used in the search template is not specified in `params`, the + // parameter's default value will be used. + // The API returns the specific Elasticsearch query that would be generated and + // run by calling the search application search API. + // + // You must have `read` privileges on the backing alias of the search + // application. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-application-render-query.html + RenderQuery search_application_render_query.NewRenderQuery + // Run a search application search. + // Generate and run an Elasticsearch query that uses the specified query + // parameteter and the search template associated with the search application or + // default template. + // Unspecified template parameters are assigned their default values if + // applicable. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-application-search.html Search search_application_search.NewSearch } type SearchableSnapshots struct { - // Retrieve node-level cache statistics about searchable snapshots. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html + // Get cache statistics. + // Get statistics about the shared cache for partially mounted indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-cache-stats.html CacheStats searchable_snapshots_cache_stats.NewCacheStats - // Clear the cache of searchable snapshots. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html + // Clear the cache. + // Clear indices and data streams from the shared cache for partially mounted + // indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-clear-cache.html ClearCache searchable_snapshots_clear_cache.NewClearCache - // Mount a snapshot as a searchable index. + // Mount a snapshot. + // Mount a snapshot as a searchable snapshot index. + // Do not use this API for snapshots managed by index lifecycle management + // (ILM). + // Manually mounting ILM-managed snapshots can interfere with ILM processes. // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-mount-snapshot.html Mount searchable_snapshots_mount.NewMount - // Retrieve shard-level statistics about searchable snapshots. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html + // Get searchable snapshot statistics. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-stats.html Stats searchable_snapshots_stats.NewStats } type Security struct { - // Creates or updates a user profile on behalf of another user. + // Activate a user profile. + // + // Create or update a user profile on behalf of another user. + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // The calling application must have either an `access_token` or a combination + // of `username` and `password` for the user that the profile document is + // intended for. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // + // This API creates or updates a profile document for end users with information + // that is extracted from the user's authentication object including `username`, + // `full_name,` `roles`, and the authentication realm. + // For example, in the JWT `access_token` case, the profile user's `username` is + // extracted from the JWT token claim pointed to by the `claims.principal` + // setting of the JWT realm that authenticated the token. + // + // When updating a profile document, the API enables the document if it was + // disabled. + // Any updates do not change existing content for either the `labels` or `data` + // fields. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-activate-user-profile.html ActivateUserProfile security_activate_user_profile.NewActivateUserProfile // Authenticate a user. + // // Authenticates a user and returns information about the authenticated user. // Include the user information in a [basic auth // header](https://en.wikipedia.org/wiki/Basic_access_authentication). @@ -2445,94 +6171,294 @@ type Security struct { // If the user cannot be authenticated, this API returns a 401 status code. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html Authenticate security_authenticate.NewAuthenticate + // Bulk delete roles. + // // The role management APIs are generally the preferred way to manage roles, // rather than using file-based role management. // The bulk delete roles API cannot delete roles that are defined in roles // files. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-delete-role.html BulkDeleteRole security_bulk_delete_role.NewBulkDeleteRole + // Bulk create or update roles. + // // The role management APIs are generally the preferred way to manage roles, // rather than using file-based role management. // The bulk create or update roles API cannot update roles that are defined in // roles files. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-put-role.html BulkPutRole security_bulk_put_role.NewBulkPutRole - // Updates the attributes of multiple existing API keys. + // Bulk update API keys. + // Update the attributes for multiple API keys. + // + // IMPORTANT: It is not possible to use an API key as the authentication + // credential for this API. To update API keys, the owner user's credentials are + // required. + // + // This API is similar to the update API key API but enables you to apply the + // same update to multiple API keys in one API call. This operation can greatly + // improve performance over making individual updates. + // + // It is not possible to update expired or invalidated API keys. + // + // This API supports updates to API key access scope, metadata and expiration. + // The access scope of each API key is derived from the `role_descriptors` you + // specify in the request and a snapshot of the owner user's permissions at the + // time of the request. + // The snapshot of the owner's permissions is updated automatically on every + // call. + // + // IMPORTANT: If you don't specify `role_descriptors` in the request, a call to + // this API might still change an API key's access scope. This change can occur + // if the owner user's permissions have changed since the API key was created or + // last modified. + // + // A successful request returns a JSON structure that contains the IDs of all + // updated API keys, the IDs of API keys that already had the requested changes + // and did not require an update, and error details for any failed update. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-update-api-keys.html BulkUpdateApiKeys security_bulk_update_api_keys.NewBulkUpdateApiKeys - // Changes the passwords of users in the native realm and built-in users. + // Change passwords. + // + // Change the passwords of users in the native realm and built-in users. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html ChangePassword security_change_password.NewChangePassword - // Evicts a subset of all entries from the API key cache. + // Clear the API key cache. + // + // Evict a subset of all entries from the API key cache. // The cache is also automatically cleared on state changes of the security // index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-api-key-cache.html ClearApiKeyCache security_clear_api_key_cache.NewClearApiKeyCache - // Evicts application privileges from the native application privileges cache. + // Clear the privileges cache. + // + // Evict privileges from the native application privilege cache. + // The cache is also automatically cleared for applications that have their + // privileges updated. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-privilege-cache.html ClearCachedPrivileges security_clear_cached_privileges.NewClearCachedPrivileges - // Evicts users from the user cache. Can completely clear the cache or evict - // specific users. + // Clear the user cache. + // + // Evict users from the user cache. + // You can completely clear the cache or evict specific users. + // + // User credentials are cached in memory on each node to avoid connecting to a + // remote authentication service or hitting the disk for every incoming request. + // There are realm settings that you can use to configure the user cache. + // For more information, refer to the documentation about controlling the user + // cache. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html ClearCachedRealms security_clear_cached_realms.NewClearCachedRealms - // Evicts roles from the native role cache. + // Clear the roles cache. + // + // Evict roles from the native role cache. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html ClearCachedRoles security_clear_cached_roles.NewClearCachedRoles - // Evicts tokens from the service account token caches. + // Clear service account token caches. + // + // Evict a subset of all entries from the service account token caches. + // Two separate caches exist for service account tokens: one cache for tokens + // backed by the `service_tokens` file, and another for tokens backed by the + // `.security` index. + // This API clears matching entries from both caches. + // + // The cache for service account tokens backed by the `.security` index is + // cleared automatically on state changes of the security index. + // The cache for tokens backed by the `service_tokens` file is cleared + // automatically on file changes. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-service-token-caches.html ClearCachedServiceTokens security_clear_cached_service_tokens.NewClearCachedServiceTokens // Create an API key. - // Creates an API key for access without requiring basic authentication. + // + // Create an API key for access without requiring basic authentication. + // + // IMPORTANT: If the credential that is used to authenticate this request is an + // API key, the derived API key cannot have any privileges. + // If you specify privileges, the API returns an error. + // // A successful request returns a JSON structure that contains the API key, its // unique id, and its name. // If applicable, it also returns expiration information for the API key in // milliseconds. + // // NOTE: By default, API keys never expire. You can specify expiration // information when you create the API keys. + // + // The API keys are created by the Elasticsearch API key service, which is + // automatically enabled. + // To configure or turn off the API key service, refer to API key service + // setting documentation. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html CreateApiKey security_create_api_key.NewCreateApiKey - // Creates a cross-cluster API key for API key based remote cluster access. + // Create a cross-cluster API key. + // + // Create an API key of the `cross_cluster` type for the API key based remote + // cluster access. + // A `cross_cluster` API key cannot be used to authenticate through the REST + // interface. + // + // IMPORTANT: To authenticate this request you must use a credential that is not + // an API key. Even if you use an API key that has the required privilege, the + // API returns an error. + // + // Cross-cluster API keys are created by the Elasticsearch API key service, + // which is automatically enabled. + // + // NOTE: Unlike REST API keys, a cross-cluster API key does not capture + // permissions of the authenticated user. The API key’s effective permission is + // exactly as specified with the `access` property. + // + // A successful request returns a JSON structure that contains the API key, its + // unique ID, and its name. If applicable, it also returns expiration + // information for the API key in milliseconds. + // + // By default, API keys never expire. You can specify expiration information + // when you create the API keys. + // + // Cross-cluster API keys can only be updated with the update cross-cluster API + // key API. + // Attempting to update them with the update REST API key API or the bulk update + // REST API keys API will result in an error. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html CreateCrossClusterApiKey security_create_cross_cluster_api_key.NewCreateCrossClusterApiKey - // Creates a service accounts token for access without requiring basic + // Create a service account token. + // + // Create a service accounts token for access without requiring basic // authentication. + // + // NOTE: Service account tokens never expire. + // You must actively delete them if they are no longer needed. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html CreateServiceToken security_create_service_token.NewCreateServiceToken - // Removes application privileges. + // Delegate PKI authentication. + // + // This API implements the exchange of an X509Certificate chain for an + // Elasticsearch access token. + // The certificate chain is validated, according to RFC 5280, by sequentially + // considering the trust configuration of every installed PKI realm that has + // `delegation.enabled` set to `true`. + // A successfully trusted client certificate is also subject to the validation + // of the subject distinguished name according to thw `username_pattern` of the + // respective realm. + // + // This API is called by smart and trusted proxies, such as Kibana, which + // terminate the user's TLS session but still want to authenticate the user by + // using a PKI realm—-​as if the user connected directly to Elasticsearch. + // + // IMPORTANT: The association between the subject public key in the target + // certificate and the corresponding private key is not validated. + // This is part of the TLS authentication process and it is delegated to the + // proxy that calls this API. + // The proxy is trusted to have performed the TLS authentication and this API + // translates that authentication into an Elasticsearch access token. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delegate-pki-authentication.html + DelegatePki security_delegate_pki.NewDelegatePki + // Delete application privileges. + // + // To use this API, you must have one of the following privileges: + // + // * The `manage_security` cluster privilege (or a greater privilege such as + // `all`). + // * The "Manage Application Privileges" global privilege for the application + // being referenced in the request. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-privilege.html DeletePrivileges security_delete_privileges.NewDeletePrivileges - // Removes roles in the native realm. + // Delete roles. + // + // Delete roles in the native realm. + // The role management APIs are generally the preferred way to manage roles, + // rather than using file-based role management. + // The delete roles API cannot remove roles that are defined in roles files. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html DeleteRole security_delete_role.NewDeleteRole - // Removes role mappings. + // Delete role mappings. + // + // Role mappings define which roles are assigned to each user. + // The role mapping APIs are generally the preferred way to manage role mappings + // rather than using role mapping files. + // The delete role mappings API cannot remove role mappings that are defined in + // role mapping files. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html DeleteRoleMapping security_delete_role_mapping.NewDeleteRoleMapping - // Deletes a service account token. + // Delete service account tokens. + // + // Delete service account tokens for a service in a specified namespace. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-service-token.html DeleteServiceToken security_delete_service_token.NewDeleteServiceToken - // Deletes users from the native realm. + // Delete users. + // + // Delete users from the native realm. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html DeleteUser security_delete_user.NewDeleteUser - // Disables users in the native realm. + // Disable users. + // + // Disable users in the native realm. + // By default, when you create users, they are enabled. + // You can use this API to revoke a user's access to Elasticsearch. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html DisableUser security_disable_user.NewDisableUser - // Disables a user profile so it's not visible in user profile searches. + // Disable a user profile. + // + // Disable user profiles so that they are not visible in user profile searches. + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // + // When you activate a user profile, its automatically enabled and visible in + // user profile searches. You can use the disable user profile API to disable a + // user profile so it’s not visible in these searches. + // To re-enable a disabled user profile, use the enable user profile API . // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user-profile.html DisableUserProfile security_disable_user_profile.NewDisableUserProfile - // Enables users in the native realm. + // Enable users. + // + // Enable users in the native realm. + // By default, when you create users, they are enabled. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html EnableUser security_enable_user.NewEnableUser - // Enables a user profile so it's visible in user profile searches. + // Enable a user profile. + // + // Enable user profiles to make them visible in user profile searches. + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // + // When you activate a user profile, it's automatically enabled and visible in + // user profile searches. + // If you later disable the user profile, you can use the enable user profile + // API to make the profile visible in these searches again. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user-profile.html EnableUserProfile security_enable_user_profile.NewEnableUserProfile - // Enables a Kibana instance to configure itself for communication with a - // secured Elasticsearch cluster. + // Enroll Kibana. + // + // Enable a Kibana instance to configure itself for communication with a secured + // Elasticsearch cluster. + // + // NOTE: This API is currently intended for internal use only by Kibana. + // Kibana uses this API internally to configure itself for communications with + // an Elasticsearch cluster that already has security features enabled. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-kibana-enrollment.html EnrollKibana security_enroll_kibana.NewEnrollKibana - // Allows a new node to join an existing cluster with security features enabled. + // Enroll a node. + // + // Enroll a new node to allow it to join an existing cluster with security + // features enabled. + // + // The response contains all the necessary information for the joining node to + // bootstrap discovery and security related settings so that it can successfully + // join the cluster. + // The response contains key and certificate material that allows the caller to + // generate valid signed certificates for the HTTP layer of all nodes in the + // cluster. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-node-enrollment.html EnrollNode security_enroll_node.NewEnrollNode // Get API key information. + // // Retrieves information for one or more API keys. // NOTE: If you have only the `manage_own_api_key` privilege, this API returns // only the API keys that you own. @@ -2541,51 +6467,140 @@ type Security struct { // ownership. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-api-key.html GetApiKey security_get_api_key.NewGetApiKey - // Retrieves the list of cluster privileges and index privileges that are - // available in this version of Elasticsearch. + // Get builtin privileges. + // + // Get the list of cluster privileges and index privileges that are available in + // this version of Elasticsearch. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html GetBuiltinPrivileges security_get_builtin_privileges.NewGetBuiltinPrivileges - // Retrieves application privileges. + // Get application privileges. + // + // To use this API, you must have one of the following privileges: + // + // * The `read_security` cluster privilege (or a greater privilege such as + // `manage_security` or `all`). + // * The "Manage Application Privileges" global privilege for the application + // being referenced in the request. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html GetPrivileges security_get_privileges.NewGetPrivileges + // Get roles. + // + // Get roles in the native realm. // The role management APIs are generally the preferred way to manage roles, // rather than using file-based role management. // The get roles API cannot retrieve roles that are defined in roles files. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html GetRole security_get_role.NewGetRole - // Retrieves role mappings. + // Get role mappings. + // + // Role mappings define which roles are assigned to each user. + // The role mapping APIs are generally the preferred way to manage role mappings + // rather than using role mapping files. + // The get role mappings API cannot retrieve role mappings that are defined in + // role mapping files. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html GetRoleMapping security_get_role_mapping.NewGetRoleMapping - // This API returns a list of service accounts that match the provided path - // parameter(s). + // Get service accounts. + // + // Get a list of service accounts that match the provided path parameters. + // + // NOTE: Currently, only the `elastic/fleet-server` service account is + // available. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-accounts.html GetServiceAccounts security_get_service_accounts.NewGetServiceAccounts - // Retrieves information of all service credentials for a service account. + // Get service account credentials. + // + // To use this API, you must have at least the `read_security` cluster privilege + // (or a greater privilege such as `manage_service_account` or + // `manage_security`). + // + // The response includes service account tokens that were created with the + // create service account tokens API as well as file-backed tokens from all + // nodes of the cluster. + // + // NOTE: For tokens backed by the `service_tokens` file, the API collects them + // from all nodes of the cluster. + // Tokens with the same name from different nodes are assumed to be the same + // token and are only counted once towards the total number of service tokens. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-credentials.html GetServiceCredentials security_get_service_credentials.NewGetServiceCredentials - // Retrieve settings for the security system indices + // Get security index settings. + // + // Get the user-configurable settings for the security internal index + // (`.security` and associated indices). + // Only a subset of the index settings — those that are user-configurable—will + // be shown. + // This includes: + // + // * `index.auto_expand_replicas` + // * `index.number_of_replicas` // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-settings.html GetSettings security_get_settings.NewGetSettings - // Creates a bearer token for access without requiring basic authentication. + // Get a token. + // + // Create a bearer token for access without requiring basic authentication. + // The tokens are created by the Elasticsearch Token Service, which is + // automatically enabled when you configure TLS on the HTTP interface. + // Alternatively, you can explicitly enable the + // `xpack.security.authc.token.enabled` setting. + // When you are running in production mode, a bootstrap check prevents you from + // enabling the token service unless you also enable TLS on the HTTP interface. + // + // The get token API takes the same parameters as a typical OAuth 2.0 token API + // except for the use of a JSON request body. + // + // A successful get token API call returns a JSON structure that contains the + // access token, the amount of time (seconds) that the token expires in, the + // type, and the scope if available. + // + // The tokens returned by the get token API have a finite period of time for + // which they are valid and after that time period, they can no longer be used. + // That time period is defined by the `xpack.security.authc.token.timeout` + // setting. + // If you want to invalidate a token immediately, you can do so by using the + // invalidate token API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html GetToken security_get_token.NewGetToken - // Retrieves information about users in the native realm and built-in users. + // Get users. + // + // Get information about users in the native realm and built-in users. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html GetUser security_get_user.NewGetUser - // Retrieves security privileges for the logged in user. + // Get user privileges. + // + // Get the security privileges for the logged in user. + // All users can use this API, but only to determine their own privileges. + // To check the privileges of other users, you must use the run as feature. + // To check whether a user has a specific list of privileges, use the has + // privileges API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html GetUserPrivileges security_get_user_privileges.NewGetUserPrivileges - // Retrieves a user's profile using the unique profile ID. + // Get a user profile. + // + // Get a user's profile using the unique profile ID. + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html GetUserProfile security_get_user_profile.NewGetUserProfile - // Creates an API key on behalf of another user. - // This API is similar to Create API keys, however it creates the API key for a - // user that is different than the user that runs the API. - // The caller must have authentication credentials (either an access token, or a - // username and password) for the user on whose behalf the API key will be - // created. - // It is not possible to use this API to create an API key without that user’s + // Grant an API key. + // + // Create an API key on behalf of another user. + // This API is similar to the create API keys API, however it creates the API + // key for a user that is different than the user that runs the API. + // The caller must have authentication credentials for the user on whose behalf + // the API key will be created. + // It is not possible to use this API to create an API key without that user's // credentials. + // The supported user authentication credential types are: + // + // * username and password + // * Elasticsearch access tokens + // * JWTs + // // The user, for whom the authentication credentials is provided, can optionally // "run as" (impersonate) another user. // In this case, the API key will be created on behalf of the impersonated user. @@ -2593,6 +6608,8 @@ type Security struct { // This API is intended be used by applications that need to create and manage // API keys for end users, but cannot guarantee that those users have permission // to create API keys on their own behalf. + // The API keys are created by the Elasticsearch API key service, which is + // automatically enabled. // // A successful grant API key API call returns a JSON structure that contains // the API key, its unique id, and its name. @@ -2604,315 +6621,1315 @@ type Security struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-grant-api-key.html GrantApiKey security_grant_api_key.NewGrantApiKey // Check user privileges. - // Determines whether the specified user has a specified list of privileges. + // + // Determine whether the specified user has a specified list of privileges. + // All users can use this API, but only to determine their own privileges. + // To check the privileges of other users, you must use the run as feature. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html HasPrivileges security_has_privileges.NewHasPrivileges - // Determines whether the users associated with the specified profile IDs have - // all the requested privileges. + // Check user profile privileges. + // + // Determine whether the users associated with the specified user profile IDs + // have all the requested privileges. + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges-user-profile.html HasPrivilegesUserProfile security_has_privileges_user_profile.NewHasPrivilegesUserProfile // Invalidate API keys. - // Invalidates one or more API keys. - // The `manage_api_key` privilege allows deleting any API keys. - // The `manage_own_api_key` only allows deleting API keys that are owned by the - // user. + // + // This API invalidates API keys created by the create API key or grant API key + // APIs. + // Invalidated API keys fail authentication, but they can still be viewed using + // the get API key information and query API key information APIs, for at least + // the configured retention period, until they are automatically deleted. + // + // To use this API, you must have at least the `manage_security`, + // `manage_api_key`, or `manage_own_api_key` cluster privileges. + // The `manage_security` privilege allows deleting any API key, including both + // REST and cross cluster API keys. + // The `manage_api_key` privilege allows deleting any REST API key, but not + // cross cluster API keys. + // The `manage_own_api_key` only allows deleting REST API keys that are owned by + // the user. // In addition, with the `manage_own_api_key` privilege, an invalidation request // must be issued in one of the three formats: + // // - Set the parameter `owner=true`. - // - Or, set both `username` and `realm_name` to match the user’s identity. - // - Or, if the request is issued by an API key, i.e. an API key invalidates - // itself, specify its ID in the `ids` field. + // - Or, set both `username` and `realm_name` to match the user's identity. + // - Or, if the request is issued by an API key, that is to say an API key + // invalidates itself, specify its ID in the `ids` field. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html InvalidateApiKey security_invalidate_api_key.NewInvalidateApiKey - // Invalidates one or more access tokens or refresh tokens. + // Invalidate a token. + // + // The access tokens returned by the get token API have a finite period of time + // for which they are valid. + // After that time period, they can no longer be used. + // The time period is defined by the `xpack.security.authc.token.timeout` + // setting. + // + // The refresh tokens returned by the get token API are only valid for 24 hours. + // They can also be used exactly once. + // If you want to invalidate one or more access or refresh tokens immediately, + // use this invalidate token API. + // + // NOTE: While all parameters are optional, at least one of them is required. + // More specifically, either one of `token` or `refresh_token` parameters is + // required. + // If none of these two are specified, then `realm_name` and/or `username` need + // to be specified. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html InvalidateToken security_invalidate_token.NewInvalidateToken - // Exchanges an OpenID Connection authentication response message for an - // Elasticsearch access token and refresh token pair + // Authenticate OpenID Connect. + // + // Exchange an OpenID Connect authentication response message for an + // Elasticsearch internal access token and refresh token that can be + // subsequently used for authentication. + // + // Elasticsearch exposes all the necessary OpenID Connect related functionality + // with the OpenID Connect APIs. + // These APIs are used internally by Kibana in order to provide OpenID Connect + // based authentication, but can also be used by other, custom web applications + // or other clients. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-authenticate.html OidcAuthenticate security_oidc_authenticate.NewOidcAuthenticate - // Invalidates a refresh token and access token that was generated from the - // OpenID Connect Authenticate API + // Logout of OpenID Connect. + // + // Invalidate an access token and a refresh token that were generated as a + // response to the `/_security/oidc/authenticate` API. + // + // If the OpenID Connect authentication realm in Elasticsearch is accordingly + // configured, the response to this call will contain a URI pointing to the end + // session endpoint of the OpenID Connect Provider in order to perform single + // logout. + // + // Elasticsearch exposes all the necessary OpenID Connect related functionality + // with the OpenID Connect APIs. + // These APIs are used internally by Kibana in order to provide OpenID Connect + // based authentication, but can also be used by other, custom web applications + // or other clients. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-logout.html OidcLogout security_oidc_logout.NewOidcLogout - // Creates an OAuth 2.0 authentication request as a URL string + // Prepare OpenID connect authentication. + // + // Create an oAuth 2.0 authentication request as a URL string based on the + // configuration of the OpenID Connect authentication realm in Elasticsearch. + // + // The response of this API is a URL pointing to the Authorization Endpoint of + // the configured OpenID Connect Provider, which can be used to redirect the + // browser of the user in order to continue the authentication process. + // + // Elasticsearch exposes all the necessary OpenID Connect related functionality + // with the OpenID Connect APIs. + // These APIs are used internally by Kibana in order to provide OpenID Connect + // based authentication, but can also be used by other, custom web applications + // or other clients. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-prepare-authentication.html OidcPrepareAuthentication security_oidc_prepare_authentication.NewOidcPrepareAuthentication - // Adds or updates application privileges. + // Create or update application privileges. + // + // To use this API, you must have one of the following privileges: + // + // * The `manage_security` cluster privilege (or a greater privilege such as + // `all`). + // * The "Manage Application Privileges" global privilege for the application + // being referenced in the request. + // + // Application names are formed from a prefix, with an optional suffix that + // conform to the following rules: + // + // * The prefix must begin with a lowercase ASCII letter. + // * The prefix must contain only ASCII letters or digits. + // * The prefix must be at least 3 characters long. + // * If the suffix exists, it must begin with either a dash `-` or `_`. + // * The suffix cannot contain any of the following characters: `\`, `/`, `*`, + // `?`, `"`, `<`, `>`, `|`, `,`, `*`. + // * No part of the name can contain whitespace. + // + // Privilege names must begin with a lowercase ASCII letter and must contain + // only ASCII letters and digits along with the characters `_`, `-`, and `.`. + // + // Action names can contain any number of printable ASCII characters and must + // contain at least one of the following characters: `/`, `*`, `:`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html PutPrivileges security_put_privileges.NewPutPrivileges - // The role management APIs are generally the preferred way to manage roles, - // rather than using file-based role management. + // Create or update roles. + // + // The role management APIs are generally the preferred way to manage roles in + // the native realm, rather than using file-based role management. // The create or update roles API cannot update roles that are defined in roles // files. + // File-based role management is not available in Elastic Serverless. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html PutRole security_put_role.NewPutRole - // Creates and updates role mappings. + // Create or update role mappings. + // + // Role mappings define which roles are assigned to each user. + // Each mapping has rules that identify users and a list of roles that are + // granted to those users. + // The role mapping APIs are generally the preferred way to manage role mappings + // rather than using role mapping files. The create or update role mappings API + // cannot update role mappings that are defined in role mapping files. + // + // NOTE: This API does not create roles. Rather, it maps users to existing + // roles. + // Roles can be created by using the create or update roles API or roles files. + // + // **Role templates** + // + // The most common use for role mappings is to create a mapping from a known + // value on the user to a fixed role name. + // For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should + // be given the superuser role in Elasticsearch. + // The `roles` field is used for this purpose. + // + // For more complex needs, it is possible to use Mustache templates to + // dynamically determine the names of the roles that should be granted to the + // user. + // The `role_templates` field is used for this purpose. + // + // NOTE: To use role templates successfully, the relevant scripting feature must + // be enabled. + // Otherwise, all attempts to create a role mapping with role templates fail. + // + // All of the user fields that are available in the role mapping rules are also + // available in the role templates. + // Thus it is possible to assign a user to a role that reflects their username, + // their groups, or the name of the realm to which they authenticated. + // + // By default a template is evaluated to produce a single string that is the + // name of the role which should be assigned to the user. + // If the format of the template is set to "json" then the template is expected + // to produce a JSON string or an array of JSON strings for the role names. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html PutRoleMapping security_put_role_mapping.NewPutRoleMapping - // Adds and updates users in the native realm. These users are commonly referred - // to as native users. + // Create or update users. + // + // Add and update users in the native realm. + // A password is required for adding a new user but is optional when updating an + // existing user. + // To change a user's password without updating any other fields, use the change + // password API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html PutUser security_put_user.NewPutUser - // Query API keys. - // Retrieves a paginated list of API keys and their information. You can - // optionally filter the results with a query. + // Find API keys with a query. + // + // Get a paginated list of API keys and their information. + // You can optionally filter the results with a query. + // + // To use this API, you must have at least the `manage_own_api_key` or the + // `read_security` cluster privileges. + // If you have only the `manage_own_api_key` privilege, this API returns only + // the API keys that you own. + // If you have the `read_security`, `manage_api_key`, or greater privileges + // (including `manage_security`), this API returns all API keys regardless of + // ownership. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-api-key.html QueryApiKeys security_query_api_keys.NewQueryApiKeys - // Retrieves roles in a paginated manner. You can optionally filter the results - // with a query. + // Find roles with a query. + // + // Get roles in a paginated manner. + // The role management APIs are generally the preferred way to manage roles, + // rather than using file-based role management. + // The query roles API does not retrieve roles that are defined in roles files, + // nor built-in ones. + // You can optionally filter the results with a query. + // Also, the results can be paginated and sorted. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-role.html QueryRole security_query_role.NewQueryRole - // Retrieves information for Users in a paginated manner. You can optionally - // filter the results with a query. + // Find users with a query. + // + // Get information for users in a paginated manner. + // You can optionally filter the results with a query. + // + // NOTE: As opposed to the get user API, built-in users are excluded from the + // result. + // This API is only for native users. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-user.html QueryUser security_query_user.NewQueryUser - // Submits a SAML Response message to Elasticsearch for consumption. + // Authenticate SAML. + // + // Submit a SAML response message to Elasticsearch for consumption. + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // The SAML message that is submitted can be: + // + // * A response to a SAML authentication request that was previously created + // using the SAML prepare authentication API. + // * An unsolicited SAML message in the case of an IdP-initiated single sign-on + // (SSO) flow. + // + // In either case, the SAML message needs to be a base64 encoded XML document + // with a root element of ``. + // + // After successful validation, Elasticsearch responds with an Elasticsearch + // internal access token and refresh token that can be subsequently used for + // authentication. + // This API endpoint essentially exchanges SAML responses that indicate + // successful authentication in the IdP for Elasticsearch access and refresh + // tokens, which can be used for authentication against Elasticsearch. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-authenticate.html SamlAuthenticate security_saml_authenticate.NewSamlAuthenticate + // Logout of SAML completely. + // // Verifies the logout response sent from the SAML IdP. + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // The SAML IdP may send a logout response back to the SP after handling the + // SP-initiated SAML Single Logout. + // This API verifies the response by ensuring the content is relevant and + // validating its signature. + // An empty response is returned if the verification process is successful. + // The response can be sent by the IdP with either the HTTP-Redirect or the + // HTTP-Post binding. + // The caller of this API must prepare the request accordingly so that this API + // can handle either of them. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-complete-logout.html SamlCompleteLogout security_saml_complete_logout.NewSamlCompleteLogout - // Submits a SAML LogoutRequest message to Elasticsearch for consumption. + // Invalidate SAML. + // + // Submit a SAML LogoutRequest message to Elasticsearch for consumption. + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // The logout request comes from the SAML IdP during an IdP initiated Single + // Logout. + // The custom web application can use this API to have Elasticsearch process the + // `LogoutRequest`. + // After successful validation of the request, Elasticsearch invalidates the + // access token and refresh token that corresponds to that specific SAML + // principal and provides a URL that contains a SAML LogoutResponse message. + // Thus the user can be redirected back to their IdP. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-invalidate.html SamlInvalidate security_saml_invalidate.NewSamlInvalidate + // Logout of SAML. + // // Submits a request to invalidate an access token and refresh token. + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // This API invalidates the tokens that were generated for a user by the SAML + // authenticate API. + // If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP + // supports this, the Elasticsearch response contains a URL to redirect the user + // to the IdP that contains a SAML logout request (starting an SP-initiated SAML + // Single Logout). // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-logout.html SamlLogout security_saml_logout.NewSamlLogout - // Creates a SAML authentication request () as a URL string, based + // Prepare SAML authentication. + // + // Create a SAML authentication request (``) as a URL string based // on the configuration of the respective SAML realm in Elasticsearch. + // + // NOTE: This API is intended for use by custom web applications other than + // Kibana. + // If you are using Kibana, refer to the documentation for configuring SAML + // single-sign-on on the Elastic Stack. + // + // This API returns a URL pointing to the SAML Identity Provider. + // You can use the URL to redirect the browser of the user in order to continue + // the authentication process. + // The URL includes a single parameter named `SAMLRequest`, which contains a + // SAML Authentication request that is deflated and Base64 encoded. + // If the configuration dictates that SAML authentication requests should be + // signed, the URL has two extra parameters named `SigAlg` and `Signature`. + // These parameters contain the algorithm used for the signature and the + // signature value itself. + // It also returns a random string that uniquely identifies this SAML + // Authentication request. + // The caller of this API needs to store this identifier as it needs to be used + // in a following step of the authentication process. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-prepare-authentication.html SamlPrepareAuthentication security_saml_prepare_authentication.NewSamlPrepareAuthentication + // Create SAML service provider metadata. + // // Generate SAML metadata for a SAML 2.0 Service Provider. + // + // The SAML 2.0 specification provides a mechanism for Service Providers to + // describe their capabilities and configuration using a metadata file. + // This API generates Service Provider metadata based on the configuration of a + // SAML realm in Elasticsearch. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-sp-metadata.html SamlServiceProviderMetadata security_saml_service_provider_metadata.NewSamlServiceProviderMetadata + // Suggest a user profile. + // // Get suggestions for user profiles that match specified search criteria. + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-suggest-user-profile.html SuggestUserProfiles security_suggest_user_profiles.NewSuggestUserProfiles // Update an API key. - // Updates attributes of an existing API key. + // + // Update attributes of an existing API key. + // This API supports updates to an API key's access scope, expiration, and + // metadata. + // + // To use this API, you must have at least the `manage_own_api_key` cluster + // privilege. // Users can only update API keys that they created or that were granted to // them. - // Use this API to update API keys created by the create API Key or grant API + // To update another user’s API key, use the `run_as` feature to submit a + // request on behalf of another user. + // + // IMPORTANT: It's not possible to use an API key as the authentication + // credential for this API. The owner user’s credentials are required. + // + // Use this API to update API keys created by the create API key or grant API // Key APIs. - // If you need to apply the same update to many API keys, you can use bulk - // update API Keys to reduce overhead. - // It’s not possible to update expired API keys, or API keys that have been - // invalidated by invalidate API Key. - // This API supports updates to an API key’s access scope and metadata. + // If you need to apply the same update to many API keys, you can use the bulk + // update API keys API to reduce overhead. + // It's not possible to update expired API keys or API keys that have been + // invalidated by the invalidate API key API. + // // The access scope of an API key is derived from the `role_descriptors` you - // specify in the request, and a snapshot of the owner user’s permissions at the + // specify in the request and a snapshot of the owner user's permissions at the // time of the request. - // The snapshot of the owner’s permissions is updated automatically on every + // The snapshot of the owner's permissions is updated automatically on every // call. - // If you don’t specify `role_descriptors` in the request, a call to this API - // might still change the API key’s access scope. - // This change can occur if the owner user’s permissions have changed since the - // API key was created or last modified. - // To update another user’s API key, use the `run_as` feature to submit a - // request on behalf of another user. - // IMPORTANT: It’s not possible to use an API key as the authentication - // credential for this API. - // To update an API key, the owner user’s credentials are required. + // + // IMPORTANT: If you don't specify `role_descriptors` in the request, a call to + // this API might still change the API key's access scope. + // This change can occur if the owner user's permissions have changed since the + // API key was created or last modified. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-api-key.html UpdateApiKey security_update_api_key.NewUpdateApiKey - // Update settings for the security system index + // Update a cross-cluster API key. + // + // Update the attributes of an existing cross-cluster API key, which is used for + // API key based remote cluster access. + // + // To use this API, you must have at least the `manage_security` cluster + // privilege. + // Users can only update API keys that they created. + // To update another user's API key, use the `run_as` feature to submit a + // request on behalf of another user. + // + // IMPORTANT: It's not possible to use an API key as the authentication + // credential for this API. + // To update an API key, the owner user's credentials are required. + // + // It's not possible to update expired API keys, or API keys that have been + // invalidated by the invalidate API key API. + // + // This API supports updates to an API key's access scope, metadata, and + // expiration. + // The owner user's information, such as the `username` and `realm`, is also + // updated automatically on every call. + // + // NOTE: This API cannot update REST API keys, which should be updated by either + // the update API key or bulk update API keys API. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-cross-cluster-api-key.html + UpdateCrossClusterApiKey security_update_cross_cluster_api_key.NewUpdateCrossClusterApiKey + // Update security index settings. + // + // Update the user-configurable settings for the security internal index + // (`.security` and associated indices). Only a subset of settings are allowed + // to be modified. This includes `index.auto_expand_replicas` and + // `index.number_of_replicas`. + // + // NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will + // be ignored during updates. + // + // If a specific index is not in use on the system and settings are provided for + // it, the request will be rejected. + // This API does not yet support configuring the settings for indices before + // they are in use. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-settings.html UpdateSettings security_update_settings.NewUpdateSettings - // Updates specific data for the user profile that's associated with the - // specified unique ID. + // Update user profile data. + // + // Update specific data for the user profile that is associated with a unique + // ID. + // + // NOTE: The user profile feature is designed only for use by Kibana and + // Elastic's Observability, Enterprise Search, and Elastic Security solutions. + // Individual users and external applications should not call this API directly. + // Elastic reserves the right to change or remove this feature in future + // releases without prior notice. + // + // To use this API, you must have one of the following privileges: + // + // * The `manage_user_profile` cluster privilege. + // * The `update_profile_data` global privilege for the namespaces that are + // referenced in the request. + // + // This API updates the `labels` and `data` fields of an existing user profile + // document with JSON objects. + // New keys and their values are added to the profile document and conflicting + // keys are replaced by data that's included in the request. + // + // For both labels and data, content is namespaced by the top-level fields. + // The `update_profile_data` global privilege grants privileges for updating + // only the allowed namespaces. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-user-profile-data.html UpdateUserProfileData security_update_user_profile_data.NewUpdateUserProfileData } type Shutdown struct { - // Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - // and ECK. Direct use is not supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current + // Cancel node shutdown preparations. + // Remove a node from the shutdown list so it can resume normal operations. + // You must explicitly clear the shutdown request when a node rejoins the + // cluster or when a node has permanently left the cluster. + // Shutdown requests are never removed automatically by Elasticsearch. + // + // NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + // Cloud Enterprise, and Elastic Cloud on Kubernetes. + // Direct use is not supported. + // + // If the operator privileges feature is enabled, you must be an operator to use + // this API. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-shutdown.html DeleteNode shutdown_delete_node.NewDeleteNode - // Retrieve status of a node or nodes that are currently marked as shutting - // down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + // Get the shutdown status. + // + // Get information about nodes that are ready to be shut down, have shut down + // preparations still in progress, or have stalled. + // The API returns status information for each part of the shut down process. + // + // NOTE: This feature is designed for indirect use by Elasticsearch Service, + // Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current + // + // If the operator privileges feature is enabled, you must be an operator to use + // this API. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-shutdown.html GetNode shutdown_get_node.NewGetNode - // Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - // Direct use is not supported. - // https://www.elastic.co/guide/en/elasticsearch/reference/current + // Prepare a node to be shut down. + // + // NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + // Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + // supported. + // + // If you specify a node that is offline, it will be prepared for shut down when + // it rejoins the cluster. + // + // If the operator privileges feature is enabled, you must be an operator to use + // this API. + // + // The API migrates ongoing tasks and index shards to other nodes as needed to + // prepare a node to be restarted or shut down and removed from the cluster. + // This ensures that Elasticsearch can be stopped safely with minimal disruption + // to the cluster. + // + // You must specify the type of shutdown: `restart`, `remove`, or `replace`. + // If a node is already being prepared for shutdown, you can use this API to + // change the shutdown type. + // + // IMPORTANT: This API does NOT terminate the Elasticsearch process. + // Monitor the node shutdown status to determine when it is safe to stop + // Elasticsearch. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-shutdown.html PutNode shutdown_put_node.NewPutNode } +type Simulate struct { + // Simulate data ingestion. + // Run ingest pipelines against a set of provided documents, optionally with + // substitute pipeline definitions, to simulate ingesting data into an index. + // + // This API is meant to be used for troubleshooting or pipeline development, as + // it does not actually index any data into Elasticsearch. + // + // The API runs the default and final pipeline for that index against a set of + // documents provided in the body of the request. + // If a pipeline contains a reroute processor, it follows that reroute processor + // to the new index, running that index's pipelines as well the same way that a + // non-simulated ingest would. + // No data is indexed into Elasticsearch. + // Instead, the transformed document is returned, along with the list of + // pipelines that have been run and the name of the index where the document + // would have been indexed if this were not a simulation. + // The transformed document is validated against the mappings that would apply + // to this index, and any validation error is reported in the result. + // + // This API differs from the simulate pipeline API in that you specify a single + // pipeline for that API, and it runs only that one pipeline. + // The simulate pipeline API is more useful for developing a single pipeline, + // while the simulate ingest API is more useful for troubleshooting the + // interaction of the various pipelines that get applied when ingesting into an + // index. + // + // By default, the pipeline definitions that are currently in the system are + // used. + // However, you can supply substitute pipeline definitions in the body of the + // request. + // These will be used in place of the pipeline definitions that are already in + // the system. This can be used to replace existing pipeline definitions or to + // create new ones. The pipeline substitutions are used only within this + // request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/simulate-ingest-api.html + Ingest simulate_ingest.NewIngest +} + type Slm struct { - // Deletes an existing snapshot lifecycle policy. + // Delete a policy. + // Delete a snapshot lifecycle policy definition. + // This operation prevents any future snapshots from being taken but does not + // cancel in-progress snapshots or remove previously-taken snapshots. // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-policy.html DeleteLifecycle slm_delete_lifecycle.NewDeleteLifecycle - // Immediately creates a snapshot according to the lifecycle policy, without - // waiting for the scheduled time. + // Run a policy. + // Immediately create a snapshot according to the snapshot lifecycle policy + // without waiting for the scheduled time. + // The snapshot policy is normally applied according to its schedule, but you + // might want to manually run a policy before performing an upgrade or other + // maintenance. // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-lifecycle.html ExecuteLifecycle slm_execute_lifecycle.NewExecuteLifecycle - // Deletes any snapshots that are expired according to the policy's retention - // rules. + // Run a retention policy. + // Manually apply the retention policy to force immediate removal of snapshots + // that are expired according to the snapshot lifecycle policy retention rules. + // The retention policy is normally applied according to its schedule. // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-retention.html ExecuteRetention slm_execute_retention.NewExecuteRetention - // Retrieves one or more snapshot lifecycle policy definitions and information - // about the latest snapshot attempts. + // Get policy information. + // Get snapshot lifecycle policy definitions and information about the latest + // snapshot attempts. // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-policy.html GetLifecycle slm_get_lifecycle.NewGetLifecycle - // Returns global and policy-level statistics about actions taken by snapshot + // Get snapshot lifecycle management statistics. + // Get global and policy-level statistics about actions taken by snapshot // lifecycle management. // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-stats.html GetStats slm_get_stats.NewGetStats - // Retrieves the status of snapshot lifecycle management (SLM). + // Get the snapshot lifecycle management status. // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-status.html GetStatus slm_get_status.NewGetStatus - // Creates or updates a snapshot lifecycle policy. + // Create or update a policy. + // Create or update a snapshot lifecycle policy. + // If the policy already exists, this request increments the policy version. + // Only the latest version of a policy is stored. // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-policy.html PutLifecycle slm_put_lifecycle.NewPutLifecycle - // Turns on snapshot lifecycle management (SLM). + // Start snapshot lifecycle management. + // Snapshot lifecycle management (SLM) starts automatically when a cluster is + // formed. + // Manually starting SLM is necessary only if it has been stopped using the stop + // SLM API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.html Start slm_start.NewStart - // Turns off snapshot lifecycle management (SLM). + // Stop snapshot lifecycle management. + // Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. + // This API is useful when you are performing maintenance on a cluster and need + // to prevent SLM from performing any actions on your data streams or indices. + // Stopping SLM does not stop any snapshots that are in progress. + // You can manually trigger snapshots with the run snapshot lifecycle policy API + // even if SLM is stopped. + // + // The API returns a response as soon as the request is acknowledged, but the + // plugin might continue to run until in-progress operations complete and it can + // be safely stopped. + // Use the get snapshot lifecycle management status API to see if SLM is + // running. // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.html Stop slm_stop.NewStop } type Snapshot struct { - // Triggers the review of a snapshot repository’s contents and deletes any stale - // data not referenced by existing snapshots. + // Clean up the snapshot repository. + // Trigger the review of the contents of a snapshot repository and delete any + // stale data not referenced by existing snapshots. // https://www.elastic.co/guide/en/elasticsearch/reference/current/clean-up-snapshot-repo-api.html CleanupRepository snapshot_cleanup_repository.NewCleanupRepository - // Clones indices from one snapshot into another snapshot in the same - // repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Clone a snapshot. + // Clone part of all of a snapshot into another snapshot in the same repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/clone-snapshot-api.html Clone snapshot_clone.NewClone - // Creates a snapshot in a repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Create a snapshot. + // Take a snapshot of a cluster or of data streams and indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-snapshot-api.html Create snapshot_create.NewCreate - // Creates a repository. + // Create or update a snapshot repository. + // IMPORTANT: If you are migrating searchable snapshots, the repository name + // must be identical in the source and destination clusters. + // To register a snapshot repository, the cluster's global metadata must be + // writeable. + // Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` + // and `clsuter.blocks.read_only_allow_delete` settings) that prevent write + // access. // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html CreateRepository snapshot_create_repository.NewCreateRepository - // Deletes one or more snapshots. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Delete snapshots. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-snapshot-api.html Delete snapshot_delete.NewDelete - // Deletes a repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Delete snapshot repositories. + // When a repository is unregistered, Elasticsearch removes only the reference + // to the location where the repository is storing the snapshots. + // The snapshots themselves are left untouched and in place. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-snapshot-repo-api.html DeleteRepository snapshot_delete_repository.NewDeleteRepository - // Returns information about a snapshot. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Get snapshot information. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-api.html Get snapshot_get.NewGet - // Returns information about a repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Get snapshot repository information. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-repo-api.html GetRepository snapshot_get_repository.NewGetRepository - // Restores a snapshot. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Analyze a snapshot repository. + // Analyze the performance characteristics and any incorrect behaviour found in + // a repository. + // + // The response exposes implementation details of the analysis which may change + // from version to version. + // The response body format is therefore not considered stable and may be + // different in newer versions. + // + // There are a large number of third-party storage systems available, not all of + // which are suitable for use as a snapshot repository by Elasticsearch. + // Some storage systems behave incorrectly, or perform poorly, especially when + // accessed concurrently by multiple clients as the nodes of an Elasticsearch + // cluster do. This API performs a collection of read and write operations on + // your repository which are designed to detect incorrect behaviour and to + // measure the performance characteristics of your storage system. + // + // The default values for the parameters are deliberately low to reduce the + // impact of running an analysis inadvertently and to provide a sensible + // starting point for your investigations. + // Run your first analysis with the default parameter values to check for simple + // problems. + // If successful, run a sequence of increasingly large analyses until you + // encounter a failure or you reach a `blob_count` of at least `2000`, a + // `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, + // and a `register_operation_count` of at least `100`. + // Always specify a generous timeout, possibly `1h` or longer, to allow time for + // each analysis to run to completion. + // Perform the analyses using a multi-node cluster of a similar size to your + // production cluster so that it can detect any problems that only arise when + // the repository is accessed by many nodes at once. + // + // If the analysis fails, Elasticsearch detected that your repository behaved + // unexpectedly. + // This usually means you are using a third-party storage system with an + // incorrect or incompatible implementation of the API it claims to support. + // If so, this storage system is not suitable for use as a snapshot repository. + // You will need to work with the supplier of your storage system to address the + // incompatibilities that Elasticsearch detects. + // + // If the analysis is successful, the API returns details of the testing + // process, optionally including how long each operation took. + // You can use this information to determine the performance of your storage + // system. + // If any operation fails or returns an incorrect result, the API returns an + // error. + // If the API returns an error, it may not have removed all the data it wrote to + // the repository. + // The error will indicate the location of any leftover data and this path is + // also recorded in the Elasticsearch logs. + // You should verify that this location has been cleaned up correctly. + // If there is still leftover data at the specified location, you should + // manually remove it. + // + // If the connection from your client to Elasticsearch is closed while the + // client is waiting for the result of the analysis, the test is cancelled. + // Some clients are configured to close their connection if no response is + // received within a certain timeout. + // An analysis takes a long time to complete so you might need to relax any such + // client-side timeouts. + // On cancellation the analysis attempts to clean up the data it was writing, + // but it may not be able to remove it all. + // The path to the leftover data is recorded in the Elasticsearch logs. + // You should verify that this location has been cleaned up correctly. + // If there is still leftover data at the specified location, you should + // manually remove it. + // + // If the analysis is successful then it detected no incorrect behaviour, but + // this does not mean that correct behaviour is guaranteed. + // The analysis attempts to detect common bugs but it does not offer 100% + // coverage. + // Additionally, it does not test the following: + // + // * Your repository must perform durable writes. Once a blob has been written + // it must remain in place until it is deleted, even after a power loss or + // similar disaster. + // * Your repository must not suffer from silent data corruption. Once a blob + // has been written, its contents must remain unchanged until it is deliberately + // modified or deleted. + // * Your repository must behave correctly even if connectivity from the cluster + // is disrupted. Reads and writes may fail in this case, but they must not + // return incorrect results. + // + // IMPORTANT: An analysis writes a substantial amount of data to your repository + // and then reads it back again. + // This consumes bandwidth on the network between the cluster and the + // repository, and storage space and I/O bandwidth on the repository itself. + // You must ensure this load does not affect other users of these systems. + // Analyses respect the repository settings `max_snapshot_bytes_per_sec` and + // `max_restore_bytes_per_sec` if available and the cluster setting + // `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth + // they consume. + // + // NOTE: This API is intended for exploratory use by humans. You should expect + // the request parameters and the response format to vary in future versions. + // + // NOTE: Different versions of Elasticsearch may perform different checks for + // repository compatibility, with newer versions typically being stricter than + // older ones. + // A storage system that passes repository analysis with one version of + // Elasticsearch may fail with a different version. + // This indicates it behaves incorrectly in ways that the former version did not + // detect. + // You must work with the supplier of your storage system to address the + // incompatibilities detected by the repository analysis API in any version of + // Elasticsearch. + // + // NOTE: This API may not work correctly in a mixed-version cluster. + // + // *Implementation details* + // + // NOTE: This section of documentation describes how the repository analysis API + // works in this version of Elasticsearch, but you should expect the + // implementation to vary between versions. The request parameters and response + // format depend on details of the implementation so may also be different in + // newer versions. + // + // The analysis comprises a number of blob-level tasks, as set by the + // `blob_count` parameter and a number of compare-and-exchange operations on + // linearizable registers, as set by the `register_operation_count` parameter. + // These tasks are distributed over the data and master-eligible nodes in the + // cluster for execution. + // + // For most blob-level tasks, the executing node first writes a blob to the + // repository and then instructs some of the other nodes in the cluster to + // attempt to read the data it just wrote. + // The size of the blob is chosen randomly, according to the `max_blob_size` and + // `max_total_data_size` parameters. + // If any of these reads fails then the repository does not implement the + // necessary read-after-write semantics that Elasticsearch requires. + // + // For some blob-level tasks, the executing node will instruct some of its peers + // to attempt to read the data before the writing process completes. + // These reads are permitted to fail, but must not return partial data. + // If any read returns partial data then the repository does not implement the + // necessary atomicity semantics that Elasticsearch requires. + // + // For some blob-level tasks, the executing node will overwrite the blob while + // its peers are reading it. + // In this case the data read may come from either the original or the + // overwritten blob, but the read operation must not return partial data or a + // mix of data from the two blobs. + // If any of these reads returns partial data or a mix of the two blobs then the + // repository does not implement the necessary atomicity semantics that + // Elasticsearch requires for overwrites. + // + // The executing node will use a variety of different methods to write the blob. + // For instance, where applicable, it will use both single-part and multi-part + // uploads. + // Similarly, the reading nodes will use a variety of different methods to read + // the data back again. + // For instance they may read the entire blob from start to end or may read only + // a subset of the data. + // + // For some blob-level tasks, the executing node will cancel the write before it + // is complete. + // In this case, it still instructs some of the other nodes in the cluster to + // attempt to read the blob but all of these reads must fail to find the blob. + // + // Linearizable registers are special blobs that Elasticsearch manipulates using + // an atomic compare-and-exchange operation. + // This operation ensures correct and strongly-consistent behavior even when the + // blob is accessed by multiple nodes at the same time. + // The detailed implementation of the compare-and-exchange operation on + // linearizable registers varies by repository type. + // Repository analysis verifies that that uncontended compare-and-exchange + // operations on a linearizable register blob always succeed. + // Repository analysis also verifies that contended operations either succeed or + // report the contention but do not return incorrect results. + // If an operation fails due to contention, Elasticsearch retries the operation + // until it succeeds. + // Most of the compare-and-exchange operations performed by repository analysis + // atomically increment a counter which is represented as an 8-byte blob. + // Some operations also verify the behavior on small blobs with sizes other than + // 8 bytes. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/repo-analysis-api.html + RepositoryAnalyze snapshot_repository_analyze.NewRepositoryAnalyze + // Verify the repository integrity. + // Verify the integrity of the contents of a snapshot repository. + // + // This API enables you to perform a comprehensive check of the contents of a + // repository, looking for any anomalies in its data or metadata which might + // prevent you from restoring snapshots from the repository or which might cause + // future snapshot create or delete operations to fail. + // + // If you suspect the integrity of the contents of one of your snapshot + // repositories, cease all write activity to this repository immediately, set + // its `read_only` option to `true`, and use this API to verify its integrity. + // Until you do so: + // + // * It may not be possible to restore some snapshots from this repository. + // * Searchable snapshots may report errors when searched or may have unassigned + // shards. + // * Taking snapshots into this repository may fail or may appear to succeed but + // have created a snapshot which cannot be restored. + // * Deleting snapshots from this repository may fail or may appear to succeed + // but leave the underlying data on disk. + // * Continuing to write to the repository while it is in an invalid state may + // causing additional damage to its contents. + // + // If the API finds any problems with the integrity of the contents of your + // repository, Elasticsearch will not be able to repair the damage. + // The only way to bring the repository back into a fully working state after + // its contents have been damaged is by restoring its contents from a repository + // backup which was taken before the damage occurred. + // You must also identify what caused the damage and take action to prevent it + // from happening again. + // + // If you cannot restore a repository backup, register a new repository and use + // this for all future snapshot operations. + // In some cases it may be possible to recover some of the contents of a damaged + // repository, either by restoring as many of its snapshots as needed and taking + // new snapshots of the restored data, or by using the reindex API to copy data + // from any searchable snapshots mounted from the damaged repository. + // + // Avoid all operations which write to the repository while the verify + // repository integrity API is running. + // If something changes the repository contents while an integrity verification + // is running then Elasticsearch may incorrectly report having detected some + // anomalies in its contents due to the concurrent writes. + // It may also incorrectly fail to report some anomalies that the concurrent + // writes prevented it from detecting. + // + // NOTE: This API is intended for exploratory use by humans. You should expect + // the request parameters and the response format to vary in future versions. + // + // NOTE: This API may not work correctly in a mixed-version cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/verify-repo-integrity-api.html + RepositoryVerifyIntegrity snapshot_repository_verify_integrity.NewRepositoryVerifyIntegrity + // Restore a snapshot. + // Restore a snapshot of a cluster or data streams and indices. + // + // You can restore a snapshot only to a running cluster with an elected master + // node. + // The snapshot repository must be registered and available to the cluster. + // The snapshot and cluster versions must be compatible. + // + // To restore a snapshot, the cluster's global metadata must be writable. Ensure + // there are't any cluster blocks that prevent writes. The restore operation + // ignores index blocks. + // + // Before you restore a data stream, ensure the cluster contains a matching + // index template with data streams enabled. To check, use the index management + // feature in Kibana or the get index template API: + // + // ``` + // GET + // _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream + // ``` + // + // If no such template exists, you can create one or restore a cluster state + // that contains one. Without a matching index template, a data stream can't + // roll over or create backing indices. + // + // If your snapshot contains data from App Search or Workplace Search, you must + // restore the Enterprise Search encryption key before you restore the snapshot. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/restore-snapshot-api.html Restore snapshot_restore.NewRestore - // Returns information about the status of a snapshot. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Get the snapshot status. + // Get a detailed description of the current state for each shard participating + // in the snapshot. + // Note that this API should be used only to obtain detailed shard-level + // information for ongoing snapshots. + // If this detail is not needed or you want to obtain information about one or + // more existing snapshots, use the get snapshot API. + // + // WARNING: Using the API to return the status of any snapshots other than + // currently running snapshots can be expensive. + // The API requires a read from the repository for each shard in each snapshot. + // For example, if you have 100 snapshots with 1,000 shards each, an API request + // that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 + // shards). + // + // Depending on the latency of your storage, such requests can take an extremely + // long time to return results. + // These requests can also tax machine resources and, when using cloud storage, + // incur high processing costs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-status-api.html Status snapshot_status.NewStatus - // Verifies a repository. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html + // Verify a snapshot repository. + // Check for common misconfigurations in a snapshot repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/verify-snapshot-repo-api.html VerifyRepository snapshot_verify_repository.NewVerifyRepository } type Sql struct { - // Clears the SQL cursor + // Clear an SQL search cursor. // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html ClearCursor sql_clear_cursor.NewClearCursor - // Deletes an async SQL search or a stored synchronous SQL search. If the search - // is still running, the API cancels it. + // Delete an async SQL search. + // Delete an async SQL search or a stored synchronous SQL search. + // If the search is still running, the API cancels it. + // + // If the Elasticsearch security features are enabled, only the following users + // can use this API to delete a search: + // + // * Users with the `cancel_task` cluster privilege. + // * The user who first submitted the search. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-async-sql-search-api.html DeleteAsync sql_delete_async.NewDeleteAsync - // Returns the current status and available results for an async SQL search or - // stored synchronous SQL search + // Get async SQL search results. + // Get the current status and available results for an async SQL search or + // stored synchronous SQL search. + // + // If the Elasticsearch security features are enabled, only the user who first + // submitted the SQL search can retrieve the search using this API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-api.html GetAsync sql_get_async.NewGetAsync - // Returns the current status of an async SQL search or a stored synchronous SQL - // search + // Get the async SQL search status. + // Get the current status of an async SQL search or a stored synchronous SQL + // search. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-status-api.html GetAsyncStatus sql_get_async_status.NewGetAsyncStatus - // Executes a SQL request + // Get SQL search results. + // Run an SQL request. // https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html Query sql_query.NewQuery - // Translates SQL into Elasticsearch queries + // Translate SQL into Elasticsearch queries. + // Translate an SQL search into a search API request containing Query DSL. + // It accepts the same request body parameters as the SQL search API, excluding + // `cursor`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html Translate sql_translate.NewTranslate } type Ssl struct { - // Retrieves information about the X.509 certificates used to encrypt + // Get SSL certificates. + // + // Get information about the X.509 certificates that are used to encrypt // communications in the cluster. + // The API returns a list that includes certificates from all TLS contexts + // including: + // + // - Settings for transport and HTTP interfaces + // - TLS settings that are used within authentication realms + // - TLS settings for remote monitoring exporters + // + // The list includes certificates that are used for configuring trust, such as + // those configured in the `xpack.security.transport.ssl.truststore` and + // `xpack.security.transport.ssl.certificate_authorities` settings. + // It also includes certificates that are used for configuring server identity, + // such as `xpack.security.http.ssl.keystore` and + // `xpack.security.http.ssl.certificate settings`. + // + // The list does not include certificates that are sourced from the default SSL + // context of the Java Runtime Environment (JRE), even if those certificates are + // in use within Elasticsearch. + // + // NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the + // API returns all the certificates that are included in the PKCS#11 token + // irrespective of whether these are used in the Elasticsearch TLS + // configuration. + // + // If Elasticsearch is configured to use a keystore or truststore, the API + // output includes all certificates in that store, even though some of the + // certificates might not be in active use within the cluster. // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html Certificates ssl_certificates.NewCertificates } type Synonyms struct { - // Deletes a synonym set + // Delete a synonym set. + // + // You can only delete a synonyms set that is not in use by any index analyzer. + // + // Synonyms sets can be used in synonym graph token filters and synonym token + // filters. + // These synonym filters can be used as part of search analyzers. + // + // Analyzers need to be loaded when an index is restored (such as when a node + // starts, or the index becomes open). + // Even if the analyzer is not used on any field mapping, it still needs to be + // loaded on the index recovery phase. + // + // If any analyzers cannot be loaded, the index becomes unavailable and the + // cluster status becomes red or yellow as index shards are not available. + // To prevent that, synonyms sets that are used in analyzers can't be deleted. + // A delete request in this case will return a 400 response code. + // + // To remove a synonyms set, you must first remove all indices that contain + // analyzers using it. + // You can migrate an index by creating a new index that does not contain the + // token filter with the synonyms set, and use the reindex API in order to copy + // over the index data. + // Once finished, you can delete the index. + // When the synonyms set is not used in analyzers, you will be able to delete + // it. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonyms-set.html DeleteSynonym synonyms_delete_synonym.NewDeleteSynonym - // Deletes a synonym rule in a synonym set + // Delete a synonym rule. + // Delete a synonym rule from a synonym set. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonym-rule.html DeleteSynonymRule synonyms_delete_synonym_rule.NewDeleteSynonymRule - // Retrieves a synonym set + // Get a synonym set. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonyms-set.html GetSynonym synonyms_get_synonym.NewGetSynonym - // Retrieves a synonym rule from a synonym set + // Get a synonym rule. + // Get a synonym rule from a synonym set. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonym-rule.html GetSynonymRule synonyms_get_synonym_rule.NewGetSynonymRule - // Retrieves a summary of all defined synonym sets - // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-synonyms-sets.html + // Get all synonym sets. + // Get a summary of all defined synonym sets. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonyms-set.html GetSynonymsSets synonyms_get_synonyms_sets.NewGetSynonymsSets - // Creates or updates a synonym set. + // Create or update a synonym set. + // Synonyms sets are limited to a maximum of 10,000 synonym rules per set. + // If you need to manage more synonym rules, you can create multiple synonym + // sets. + // + // When an existing synonyms set is updated, the search analyzers that use the + // synonyms set are reloaded automatically for all indices. + // This is equivalent to invoking the reload search analyzers API for all + // indices that use the synonyms set. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonyms-set.html PutSynonym synonyms_put_synonym.NewPutSynonym - // Creates or updates a synonym rule in a synonym set + // Create or update a synonym rule. + // Create or update a synonym rule in a synonym set. + // + // If any of the synonym rules included is invalid, the API returns an error. + // + // When you update a synonym rule, all analyzers using the synonyms set will be + // reloaded automatically to reflect the new rule. // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonym-rule.html PutSynonymRule synonyms_put_synonym_rule.NewPutSynonymRule } type Tasks struct { - // Cancels a task, if it can be cancelled through an API. + // Cancel a task. + // + // WARNING: The task management API is new and should still be considered a beta + // feature. + // The API may change in ways that are not backwards compatible. + // + // A task may continue to run for some time after it has been cancelled because + // it may not be able to safely stop its current activity straight away. + // It is also possible that Elasticsearch must complete its work on other tasks + // before it can process the cancellation. + // The get task information API will continue to list these cancelled tasks + // until they complete. + // The cancelled flag in the response indicates that the cancellation command + // has been processed and the task will stop as soon as possible. + // + // To troubleshoot why a cancelled task does not complete promptly, use the get + // task information API with the `?detailed` parameter to identify the other + // tasks the system is running. + // You can also use the node hot threads API to obtain detailed information + // about the work the system is doing instead of completing the cancelled task. // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html Cancel tasks_cancel.NewCancel // Get task information. - // Returns information about the tasks currently executing in the cluster. + // Get information about a task currently running in the cluster. + // + // WARNING: The task management API is new and should still be considered a beta + // feature. + // The API may change in ways that are not backwards compatible. + // + // If the task identifier is not found, a 404 response code indicates that there + // are no resources that match the request. // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html Get tasks_get.NewGet - // The task management API returns information about tasks currently executing - // on one or more nodes in the cluster. + // Get all tasks. + // Get information about the tasks currently running on one or more nodes in the + // cluster. + // + // WARNING: The task management API is new and should still be considered a beta + // feature. + // The API may change in ways that are not backwards compatible. + // + // **Identifying running tasks** + // + // The `X-Opaque-Id header`, when provided on the HTTP request header, is going + // to be returned as a header in the response as well as in the headers field + // for in the task information. + // This enables you to track certain calls or associate certain tasks with the + // client that started them. + // For example: + // + // ``` + // curl -i -H "X-Opaque-Id: 123456" + // "http://localhost:9200/_tasks?group_by=parents" + // ``` + // + // The API returns the following result: + // + // ``` + // HTTP/1.1 200 OK + // X-Opaque-Id: 123456 + // content-type: application/json; charset=UTF-8 + // content-length: 831 + // + // { + // "tasks" : { + // "u5lcZHqcQhu-rUoFaqDphA:45" : { + // "node" : "u5lcZHqcQhu-rUoFaqDphA", + // "id" : 45, + // "type" : "transport", + // "action" : "cluster:monitor/tasks/lists", + // "start_time_in_millis" : 1513823752749, + // "running_time_in_nanos" : 293139, + // "cancellable" : false, + // "headers" : { + // "X-Opaque-Id" : "123456" + // }, + // "children" : [ + // { + // "node" : "u5lcZHqcQhu-rUoFaqDphA", + // "id" : 46, + // "type" : "direct", + // "action" : "cluster:monitor/tasks/lists[n]", + // "start_time_in_millis" : 1513823752750, + // "running_time_in_nanos" : 92133, + // "cancellable" : false, + // "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", + // "headers" : { + // "X-Opaque-Id" : "123456" + // } + // } + // ] + // } + // } + // } + // ``` + // In this example, `X-Opaque-Id: 123456` is the ID as a part of the response + // header. + // The `X-Opaque-Id` in the task `headers` is the ID for the task that was + // initiated by the REST request. + // The `X-Opaque-Id` in the children `headers` is the child task of the task + // that was initiated by the REST request. // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html List tasks_list.NewList } type TextStructure struct { - // Finds the structure of a text field in an index. + // Find the structure of a text field. + // Find the structure of a text field in an Elasticsearch index. + // + // This API provides a starting point for extracting further information from + // log messages already ingested into Elasticsearch. + // For example, if you have ingested data into a very simple index that has just + // `@timestamp` and message fields, you can use this API to see what common + // structure exists in the message field. + // + // The response from the API contains: + // + // * Sample messages. + // * Statistics that reveal the most common values for all fields detected + // within the text and basic numeric statistics for numeric fields. + // * Information about the structure of the text, which is useful when you write + // ingest configurations to index it or similarly formatted text. + // * Appropriate mappings for an Elasticsearch index, which you could use to + // ingest the text. + // + // All this information can be calculated by the structure finder with no + // guidance. + // However, you can optionally override some of the decisions about the text + // structure by specifying one or more query parameters. + // + // If the structure finder produces unexpected results, specify the `explain` + // query parameter and an explanation will appear in the response. + // It helps determine why the returned structure was chosen. // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-field-structure.html FindFieldStructure text_structure_find_field_structure.NewFindFieldStructure - // Finds the structure of a list of messages. The messages must contain data - // that is suitable to be ingested into Elasticsearch. + // Find the structure of text messages. + // Find the structure of a list of text messages. + // The messages must contain data that is suitable to be ingested into + // Elasticsearch. + // + // This API provides a starting point for ingesting data into Elasticsearch in a + // format that is suitable for subsequent use with other Elastic Stack + // functionality. + // Use this API rather than the find text structure API if your input text has + // already been split up into separate messages by some other process. + // + // The response from the API contains: + // + // * Sample messages. + // * Statistics that reveal the most common values for all fields detected + // within the text and basic numeric statistics for numeric fields. + // * Information about the structure of the text, which is useful when you write + // ingest configurations to index it or similarly formatted text. + // Appropriate mappings for an Elasticsearch index, which you could use to + // ingest the text. + // + // All this information can be calculated by the structure finder with no + // guidance. + // However, you can optionally override some of the decisions about the text + // structure by specifying one or more query parameters. + // + // If the structure finder produces unexpected results, specify the `explain` + // query parameter and an explanation will appear in the response. + // It helps determine why the returned structure was chosen. // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-message-structure.html FindMessageStructure text_structure_find_message_structure.NewFindMessageStructure - // Finds the structure of a text file. The text file must contain data that is - // suitable to be ingested into Elasticsearch. + // Find the structure of a text file. + // The text file must contain data that is suitable to be ingested into + // Elasticsearch. + // + // This API provides a starting point for ingesting data into Elasticsearch in a + // format that is suitable for subsequent use with other Elastic Stack + // functionality. + // Unlike other Elasticsearch endpoints, the data that is posted to this + // endpoint does not need to be UTF-8 encoded and in JSON format. + // It must, however, be text; binary text formats are not currently supported. + // The size is limited to the Elasticsearch HTTP receive buffer size, which + // defaults to 100 Mb. + // + // The response from the API contains: + // + // * A couple of messages from the beginning of the text. + // * Statistics that reveal the most common values for all fields detected + // within the text and basic numeric statistics for numeric fields. + // * Information about the structure of the text, which is useful when you write + // ingest configurations to index it or similarly formatted text. + // * Appropriate mappings for an Elasticsearch index, which you could use to + // ingest the text. + // + // All this information can be calculated by the structure finder with no + // guidance. + // However, you can optionally override some of the decisions about the text + // structure by specifying one or more query parameters. // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html FindStructure text_structure_find_structure.NewFindStructure - // Tests a Grok pattern on some text. + // Test a Grok pattern. + // Test a Grok pattern on one or more lines of text. + // The API indicates whether the lines match the pattern together with the + // offsets and lengths of the matched substrings. // https://www.elastic.co/guide/en/elasticsearch/reference/current/test-grok-pattern.html TestGrokPattern text_structure_test_grok_pattern.NewTestGrokPattern } type Transform struct { // Delete a transform. - // Deletes a transform. // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html DeleteTransform transform_delete_transform.NewDeleteTransform // Retrieves transform usage information for transform nodes. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html GetNodeStats transform_get_node_stats.NewGetNodeStats // Get transforms. - // Retrieves configuration information for transforms. + // Get configuration information for transforms. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html GetTransform transform_get_transform.NewGetTransform // Get transform stats. - // Retrieves usage information for transforms. + // + // Get usage information for transforms. // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html GetTransformStats transform_get_transform_stats.NewGetTransformStats // Preview a transform. @@ -2964,25 +7981,23 @@ type Transform struct { // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html PutTransform transform_put_transform.NewPutTransform // Reset a transform. - // Resets a transform. + // // Before you can reset it, you must stop it; alternatively, use the `force` // query parameter. // If the destination index was created by the transform, it is deleted. // https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-transform.html ResetTransform transform_reset_transform.NewResetTransform // Schedule a transform to start now. - // Instantly runs a transform to process data. // - // If you _schedule_now a transform, it will process the new data instantly, - // without waiting for the configured frequency interval. After _schedule_now - // API is called, - // the transform will be processed again at now + frequency unless _schedule_now - // API + // Instantly run a transform to process data. + // If you run this API, the transform will process the new data instantly, + // without waiting for the configured frequency interval. After the API is + // called, + // the transform will be processed again at `now + frequency` unless the API // is called again in the meantime. // https://www.elastic.co/guide/en/elasticsearch/reference/current/schedule-now-transform.html ScheduleNowTransform transform_schedule_now_transform.NewScheduleNowTransform // Start a transform. - // Starts a transform. // // When you start a transform, it creates the destination index if it does not // already exist. The `number_of_shards` is @@ -3030,75 +8045,174 @@ type Transform struct { // time of update and runs with those privileges. // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html UpdateTransform transform_update_transform.NewUpdateTransform - // Upgrades all transforms. + // Upgrade all transforms. + // + // Transforms are compatible across minor versions and between supported major + // versions. + // However, over time, the format of transform configuration information may + // change. // This API identifies transforms that have a legacy configuration format and - // upgrades them to the latest version. It - // also cleans up the internal data structures that store the transform state - // and checkpoints. The upgrade does not - // affect the source and destination indices. The upgrade also does not affect - // the roles that transforms use when + // upgrades them to the latest version. + // It also cleans up the internal data structures that store the transform state + // and checkpoints. + // The upgrade does not affect the source and destination indices. + // The upgrade also does not affect the roles that transforms use when // Elasticsearch security features are enabled; the role used to read source - // data and write to the destination index - // remains unchanged. + // data and write to the destination index remains unchanged. + // + // If a transform upgrade step fails, the upgrade stops and an error is returned + // about the underlying issue. + // Resolve the issue then re-run the process again. + // A summary is returned when the upgrade is finished. + // + // To ensure continuous transforms remain running during a major version upgrade + // of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade + // transforms before upgrading the cluster. + // You may want to perform a recent cluster backup prior to the upgrade. // https://www.elastic.co/guide/en/elasticsearch/reference/current/upgrade-transforms.html UpgradeTransforms transform_upgrade_transforms.NewUpgradeTransforms } type Watcher struct { - // Acknowledges a watch, manually throttling the execution of the watch's - // actions. + // Acknowledge a watch. + // Acknowledging a watch enables you to manually throttle the execution of the + // watch's actions. + // + // The acknowledgement state of an action is stored in the + // `status.actions..ack.state` structure. + // + // IMPORTANT: If the specified watch is currently being executed, this API will + // return an error + // The reason for this behavior is to prevent overwriting the watch status from + // a watch execution. + // + // Acknowledging an action throttles further executions of that action until its + // `ack.state` is reset to `awaits_successful_execution`. + // This happens when the condition of the watch is not met (the condition + // evaluates to false). // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html AckWatch watcher_ack_watch.NewAckWatch - // Activates a currently inactive watch. + // Activate a watch. + // A watch can be either active or inactive. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html ActivateWatch watcher_activate_watch.NewActivateWatch - // Deactivates a currently active watch. + // Deactivate a watch. + // A watch can be either active or inactive. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html DeactivateWatch watcher_deactivate_watch.NewDeactivateWatch - // Removes a watch from Watcher. + // Delete a watch. + // When the watch is removed, the document representing the watch in the + // `.watches` index is gone and it will never be run again. + // + // Deleting a watch does not delete any watch execution records related to this + // watch from the watch history. + // + // IMPORTANT: Deleting a watch must be done by using only this API. + // Do not delete the watch directly from the `.watches` index using the + // Elasticsearch delete document API + // When Elasticsearch security features are enabled, make sure no write + // privileges are granted to anyone for the `.watches` index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html DeleteWatch watcher_delete_watch.NewDeleteWatch + // Run a watch. // This API can be used to force execution of the watch outside of its // triggering logic or to simulate the watch execution for debugging purposes. + // // For testing and debugging purposes, you also have fine-grained control on how - // the watch runs. You can execute the watch without executing all of its - // actions or alternatively by simulating them. You can also force execution by - // ignoring the watch condition and control whether a watch record would be - // written to the watch history after execution. + // the watch runs. + // You can run the watch without running all of its actions or alternatively by + // simulating them. + // You can also force execution by ignoring the watch condition and control + // whether a watch record would be written to the watch history after it runs. + // + // You can use the run watch API to run watches that are not yet registered by + // specifying the watch definition inline. + // This serves as great tool for testing and debugging your watches prior to + // adding them to Watcher. + // + // When Elasticsearch security features are enabled on your cluster, watches are + // run with the privileges of the user that stored the watches. + // If your user is allowed to read index `a`, but not index `b`, then the exact + // same set of rules will apply during execution of a watch. + // + // When using the run watch API, the authorization data of the user that called + // the API will be used as a base, instead of the information who stored the + // watch. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html ExecuteWatch watcher_execute_watch.NewExecuteWatch - // Retrieve settings for the watcher system index + // Get Watcher index settings. + // Get settings for the Watcher internal index (`.watches`). + // Only a subset of settings are shown, for example `index.auto_expand_replicas` + // and `index.number_of_replicas`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-settings.html GetSettings watcher_get_settings.NewGetSettings - // Retrieves a watch by its ID. + // Get a watch. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html GetWatch watcher_get_watch.NewGetWatch - // Creates a new watch, or updates an existing one. + // Create or update a watch. + // When a watch is registered, a new document that represents the watch is added + // to the `.watches` index and its trigger is immediately registered with the + // relevant trigger engine. + // Typically for the `schedule` trigger, the scheduler is the trigger engine. + // + // IMPORTANT: You must use Kibana or this API to create a watch. + // Do not add a watch directly to the `.watches` index by using the + // Elasticsearch index API. + // If Elasticsearch security features are enabled, do not give users write + // privileges on the `.watches` index. + // + // When you add a watch you can also define its initial active state by setting + // the *active* parameter. + // + // When Elasticsearch security features are enabled, your watch can index or + // search only on indices for which the user that stored the watch has + // privileges. + // If the user is able to read index `a`, but not index `b`, the same will apply + // when the watch runs. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html PutWatch watcher_put_watch.NewPutWatch - // Retrieves stored watches. + // Query watches. + // Get all registered watches in a paginated manner and optionally filter + // watches by a query. + // + // Note that only the `_id` and `metadata.*` fields are queryable or sortable. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-query-watches.html QueryWatches watcher_query_watches.NewQueryWatches - // Starts Watcher if it is not already running. + // Start the watch service. + // Start the Watcher service if it is not already running. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html Start watcher_start.NewStart - // Retrieves the current Watcher metrics. + // Get Watcher statistics. + // This API always returns basic metrics. + // You retrieve more metrics by using the metric parameter. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html Stats watcher_stats.NewStats - // Stops Watcher if it is running. + // Stop the watch service. + // Stop the Watcher service if it is running. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html Stop watcher_stop.NewStop - // Update settings for the watcher system index + // Update Watcher index settings. + // Update settings for the Watcher internal index (`.watches`). + // Only a subset of settings can be modified. + // This includes `index.auto_expand_replicas` and `index.number_of_replicas`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-update-settings.html UpdateSettings watcher_update_settings.NewUpdateSettings } type Xpack struct { - // Provides general information about the installed X-Pack features. + // Get information. + // The information provided by the API includes: + // + // * Build information including the build number and timestamp. + // * License information about the currently installed license. + // * Feature information for the features that are currently enabled and + // available under the current license. // https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html Info xpack_info.NewInfo - // This API provides information about which features are currently enabled and - // available under the current license and some usage statistics. + // Get usage information. + // Get information about the features that are currently enabled and available + // under the current license. + // The API also provides some usage statistics. // https://www.elastic.co/guide/en/elasticsearch/reference/current/usage-api.html Usage xpack_usage.NewUsage } @@ -3136,6 +8250,7 @@ type API struct { SearchableSnapshots SearchableSnapshots Security Security Shutdown Shutdown + Simulate Simulate Slm Slm Snapshot Snapshot Sql Sql @@ -3148,112 +8263,1034 @@ type API struct { Xpack Xpack // Bulk index or delete documents. - // Performs multiple indexing or delete operations in a single API call. + // Perform multiple `index`, `create`, `delete`, and `update` actions in a + // single request. // This reduces overhead and can greatly increase indexing speed. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To use the `create` action, you must have the `create_doc`, `create`, + // `index`, or `write` index privilege. Data streams support only the `create` + // action. + // * To use the `index` action, you must have the `create`, `index`, or `write` + // index privilege. + // * To use the `delete` action, you must have the `delete` or `write` index + // privilege. + // * To use the `update` action, you must have the `index` or `write` index + // privilege. + // * To automatically create a data stream or index with a bulk API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // * To make the result of a bulk operation visible to search using the + // `refresh` parameter, you must have the `maintenance` or `manage` index + // privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // The actions are specified in the request body using a newline delimited JSON + // (NDJSON) structure: + // + // ``` + // action_and_meta_data\n + // optional_source\n + // action_and_meta_data\n + // optional_source\n + // .... + // action_and_meta_data\n + // optional_source\n + // ``` + // + // The `index` and `create` actions expect a source on the next line and have + // the same semantics as the `op_type` parameter in the standard index API. + // A `create` action fails if a document with the same ID already exists in the + // target + // An `index` action adds or replaces a document as necessary. + // + // NOTE: Data streams support only the `create` action. + // To update or delete a document in a data stream, you must target the backing + // index containing the document. + // + // An `update` action expects that the partial doc, upsert, and script and its + // options are specified on the next line. + // + // A `delete` action does not expect a source on the next line and has the same + // semantics as the standard delete API. + // + // NOTE: The final line of data must end with a newline character (`\n`). + // Each newline character may be preceded by a carriage return (`\r`). + // When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header + // of `application/json` or `application/x-ndjson`. + // Because this format uses literal newline characters (`\n`) as delimiters, + // make sure that the JSON actions and sources are not pretty printed. + // + // If you provide a target in the request path, it is used for any actions that + // don't explicitly specify an `_index` argument. + // + // A note on the format: the idea here is to make processing as fast as + // possible. + // As some of the actions are redirected to other shards on other nodes, only + // `action_meta_data` is parsed on the receiving node side. + // + // Client libraries using this protocol should try and strive to do something + // similar on the client side, and reduce buffering as much as possible. + // + // There is no "correct" number of actions to perform in a single bulk request. + // Experiment with different settings to find the optimal size for your + // particular workload. + // Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by + // default so clients must ensure that no request exceeds this size. + // It is not possible to index a single document that exceeds the size limit, so + // you must pre-process any such documents into smaller pieces before sending + // them to Elasticsearch. + // For instance, split documents into pages or chapters before indexing them, or + // store raw binary data in a system outside Elasticsearch and replace the raw + // data with a link to the external system in the documents that you send to + // Elasticsearch. + // + // **Client suppport for bulk requests** + // + // Some of the officially supported clients provide helpers to assist with bulk + // requests and reindexing: + // + // * Go: Check out `esutil.BulkIndexer` + // * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and + // `Search::Elasticsearch::Client::5_0::Scroll` + // * Python: Check out `elasticsearch.helpers.*` + // * JavaScript: Check out `client.helpers.*` + // * .NET: Check out `BulkAllObservable` + // * PHP: Check out bulk indexing. + // + // **Submitting bulk requests with cURL** + // + // If you're providing text file input to `curl`, you must use the + // `--data-binary` flag instead of plain `-d`. + // The latter doesn't preserve newlines. For example: + // + // ``` + // $ cat requests + // { "index" : { "_index" : "test", "_id" : "1" } } + // { "field1" : "value1" } + // $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk + // --data-binary "@requests"; echo + // {"took":7, "errors": false, + // "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} + // ``` + // + // **Optimistic concurrency control** + // + // Each `index` and `delete` action within a bulk API call may include the + // `if_seq_no` and `if_primary_term` parameters in their respective action and + // meta data lines. + // The `if_seq_no` and `if_primary_term` parameters control how operations are + // run, based on the last modification to existing documents. See Optimistic + // concurrency control for more details. + // + // **Versioning** + // + // Each bulk item can include the version value using the `version` field. + // It automatically follows the behavior of the index or delete operation based + // on the `_version` mapping. + // It also support the `version_type`. + // + // **Routing** + // + // Each bulk item can include the routing value using the `routing` field. + // It automatically follows the behavior of the index or delete operation based + // on the `_routing` mapping. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Wait for active shards** + // + // When making bulk calls, you can set the `wait_for_active_shards` parameter to + // require a minimum number of shard copies to be active before starting to + // process the bulk request. + // + // **Refresh** + // + // Control when the changes made by this request are visible to search. + // + // NOTE: Only the shards that receive the bulk request will be affected by + // refresh. + // Imagine a `_bulk?refresh=wait_for` request with three documents in it that + // happen to be routed to different shards in an index with five shards. + // The request will only wait for those three shards to refresh. + // The other two shards that make up the index do not participate in the `_bulk` + // request at all. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Bulk core_bulk.NewBulk - // Clears the search context and results for a scrolling search. + // Clear a scrolling search. + // Clear the search context and results for a scrolling search. // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html ClearScroll core_clear_scroll.NewClearScroll - // Closes a point-in-time. + // Close a point in time. + // A point in time must be opened explicitly before being used in search + // requests. + // The `keep_alive` parameter tells Elasticsearch how long it should persist. + // A point in time is automatically closed when the `keep_alive` period has + // elapsed. + // However, keeping points in time has a cost; close them as soon as they are no + // longer required for search requests. // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html ClosePointInTime core_close_point_in_time.NewClosePointInTime - // Returns number of documents matching a query. + // Count search results. + // Get the number of documents matching a query. + // + // The query can be provided either by using a simple query string as a + // parameter, or by defining Query DSL within the request body. + // The query is optional. When no query is provided, the API uses `match_all` to + // count all the documents. + // + // The count API supports multi-target syntax. You can run a single count API + // search across multiple data streams and indices. + // + // The operation is broadcast across all shards. + // For each shard ID group, a replica is chosen and the search is run against + // it. + // This means that replicas increase the scalability of the count. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html Count core_count.NewCount - // Index a document. - // Adds a JSON document to the specified data stream or index and makes it - // searchable. - // If the target is an index and the document already exists, the request - // updates the document and increments its version. + // Create a new document in the index. + // + // You can index a new JSON document with the `//_doc/` or + // `//_create/<_id>` APIs + // Using `_create` guarantees that the document is indexed only if it does not + // already exist. + // It returns a 409 response when a document with a same ID already exists in + // the index. + // To update an existing document, you must use the `//_doc/` API. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To add a document using the `PUT //_create/<_id>` or `POST + // //_create/<_id>` request formats, you must have the `create_doc`, + // `create`, `index`, or `write` index privilege. + // * To automatically create a data stream or index with this API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // **Automatically create data streams and indices** + // + // If the request's target doesn't exist and matches an index template with a + // `data_stream` definition, the index operation automatically creates the data + // stream. + // + // If the target doesn't exist and doesn't match a data stream template, the + // operation automatically creates the index and applies any matching index + // templates. + // + // NOTE: Elasticsearch includes several built-in index templates. To avoid + // naming collisions with these templates, refer to index pattern documentation. + // + // If no mapping exists, the index operation creates a dynamic mapping. + // By default, new fields and objects are automatically added to the mapping if + // needed. + // + // Automatic index creation is controlled by the `action.auto_create_index` + // setting. + // If it is `true`, any index can be created automatically. + // You can modify this setting to explicitly allow or block automatic creation + // of indices that match specified patterns or set it to `false` to turn off + // automatic index creation entirely. + // Specify a comma-separated list of patterns you want to allow or prefix each + // pattern with `+` or `-` to indicate whether it should be allowed or blocked. + // When a list is specified, the default behaviour is to disallow. + // + // NOTE: The `action.auto_create_index` setting affects the automatic creation + // of indices only. + // It does not affect the creation of data streams. + // + // **Routing** + // + // By default, shard placement — or routing — is controlled by using a hash of + // the document's ID value. + // For more explicit control, the value fed into the hash function used by the + // router can be directly specified on a per-operation basis using the `routing` + // parameter. + // + // When setting up explicit mapping, you can also use the `_routing` field to + // direct the index operation to extract the routing value from the document + // itself. + // This does come at the (very minimal) cost of an additional document parsing + // pass. + // If the `_routing` mapping is defined and set to be required, the index + // operation will fail if no routing value is provided or extracted. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Distributed** + // + // The index operation is directed to the primary shard based on its route and + // performed on the actual node containing this shard. + // After the primary shard completes the operation, if needed, the update is + // distributed to applicable replicas. + // + // **Active shards** + // + // To improve the resiliency of writes to the system, indexing operations can be + // configured to wait for a certain number of active shard copies before + // proceeding with the operation. + // If the requisite number of active shard copies are not available, then the + // write operation must wait and retry, until either the requisite shard copies + // have started or a timeout occurs. + // By default, write operations only wait for the primary shards to be active + // before proceeding (that is to say `wait_for_active_shards` is `1`). + // This default can be overridden in the index settings dynamically by setting + // `index.write.wait_for_active_shards`. + // To alter this behavior per operation, use the `wait_for_active_shards + // request` parameter. + // + // Valid values are all or any positive integer up to the total number of + // configured copies per shard in the index (which is `number_of_replicas`+1). + // Specifying a negative value or a number greater than the number of shard + // copies will throw an error. + // + // For example, suppose you have a cluster of three nodes, A, B, and C and you + // create an index index with the number of replicas set to 3 (resulting in 4 + // shard copies, one more copy than there are nodes). + // If you attempt an indexing operation, by default the operation will only + // ensure the primary copy of each shard is available before proceeding. + // This means that even if B and C went down and A hosted the primary shard + // copies, the indexing operation would still proceed with only one copy of the + // data. + // If `wait_for_active_shards` is set on the request to `3` (and all three nodes + // are up), the indexing operation will require 3 active shard copies before + // proceeding. + // This requirement should be met because there are 3 active nodes in the + // cluster, each one holding a copy of the shard. + // However, if you set `wait_for_active_shards` to `all` (or to `4`, which is + // the same in this situation), the indexing operation will not proceed as you + // do not have all 4 copies of each shard active in the index. + // The operation will timeout unless a new node is brought up in the cluster to + // host the fourth copy of the shard. + // + // It is important to note that this setting greatly reduces the chances of the + // write operation not writing to the requisite number of shard copies, but it + // does not completely eliminate the possibility, because this check occurs + // before the write operation starts. + // After the write operation is underway, it is still possible for replication + // to fail on any number of shard copies but still succeed on the primary. + // The `_shards` section of the API response reveals the number of shard copies + // on which replication succeeded and failed. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html Create core_create.NewCreate // Delete a document. - // Removes a JSON document from the specified index. + // + // Remove a JSON document from the specified index. + // + // NOTE: You cannot send deletion requests directly to a data stream. + // To delete a document in a data stream, you must target the backing index + // containing the document. + // + // **Optimistic concurrency control** + // + // Delete operations can be made conditional and only be performed if the last + // modification to the document was assigned the sequence number and primary + // term specified by the `if_seq_no` and `if_primary_term` parameters. + // If a mismatch is detected, the operation will result in a + // `VersionConflictException` and a status code of `409`. + // + // **Versioning** + // + // Each document indexed is versioned. + // When deleting a document, the version can be specified to make sure the + // relevant document you are trying to delete is actually being deleted and it + // has not changed in the meantime. + // Every write operation run on a document, deletes included, causes its version + // to be incremented. + // The version number of a deleted document remains available for a short time + // after deletion to allow for control of concurrent operations. + // The length of time for which a deleted document's version remains available + // is determined by the `index.gc_deletes` index setting. + // + // **Routing** + // + // If routing is used during indexing, the routing value also needs to be + // specified to delete a document. + // + // If the `_routing` mapping is set to `required` and no routing value is + // specified, the delete API throws a `RoutingMissingException` and rejects the + // request. + // + // For example: + // + // ``` + // DELETE /my-index-000001/_doc/1?routing=shard-1 + // ``` + // + // This request deletes the document with ID 1, but it is routed based on the + // user. + // The document is not deleted if the correct routing is not specified. + // + // **Distributed** + // + // The delete operation gets hashed into a specific shard ID. + // It then gets redirected into the primary shard within that ID group and + // replicated (if needed) to shard replicas within that ID group. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html Delete core_delete.NewDelete // Delete documents. + // // Deletes documents that match the specified query. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or alias: + // + // * `read` + // * `delete` or `write` + // + // You can specify the query criteria in the request URI or the request body + // using the same syntax as the search API. + // When you submit a delete by query request, Elasticsearch gets a snapshot of + // the data stream or index when it begins processing the request and deletes + // matching documents using internal versioning. + // If a document changes between the time that the snapshot is taken and the + // delete operation is processed, it results in a version conflict and the + // delete operation fails. + // + // NOTE: Documents with a version equal to 0 cannot be deleted using delete by + // query because internal versioning does not support 0 as a valid version + // number. + // + // While processing a delete by query request, Elasticsearch performs multiple + // search requests sequentially to find all of the matching documents to delete. + // A bulk delete request is performed for each batch of matching documents. + // If a search or bulk request is rejected, the requests are retried up to 10 + // times, with exponential back off. + // If the maximum retry limit is reached, processing halts and all failed + // requests are returned in the response. + // Any delete requests that completed successfully still stick, they are not + // rolled back. + // + // You can opt to count version conflicts instead of halting and returning by + // setting `conflicts` to `proceed`. + // Note that if you opt to count version conflicts the operation could attempt + // to delete more documents from the source than `max_docs` until it has + // successfully deleted `max_docs documents`, or it has gone through every + // document in the source query. + // + // **Throttling delete requests** + // + // To control the rate at which delete by query issues batches of delete + // operations, you can set `requests_per_second` to any positive decimal number. + // This pads each batch with a wait time to throttle the rate. + // Set `requests_per_second` to `-1` to disable throttling. + // + // Throttling uses a wait time between batches so that the internal scroll + // requests can be given a timeout that takes the request padding into account. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is `1000`, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single `_bulk` request, large batch sizes + // cause Elasticsearch to create many requests and wait before starting the next + // set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Delete by query supports sliced scroll to parallelize the delete process. + // This can improve efficiency and provide a convenient way to break the request + // down into smaller parts. + // + // Setting `slices` to `auto` lets Elasticsearch choose the number of slices to + // use. + // This setting will use one slice per shard, up to a certain limit. + // If there are multiple source data streams or indices, it will choose the + // number of slices based on the index or backing index with the smallest number + // of shards. + // Adding slices to the delete by query operation creates sub-requests which + // means it has some quirks: + // + // * You can see these requests in the tasks APIs. These sub-requests are + // "child" tasks of the task for the request with slices. + // * Fetching the status of the task for the request with slices only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with `slices` will cancel each sub-request. + // * Due to the nature of `slices` each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // `slices` are distributed proportionally to each sub-request. Combine that + // with the earlier point about distribution being uneven and you should + // conclude that using `max_docs` with `slices` might not result in exactly + // `max_docs` documents being deleted. + // * Each sub-request gets a slightly different snapshot of the source data + // stream or index though these are all taken at approximately the same time. + // + // If you're slicing manually or otherwise tuning automatic slicing, keep in + // mind that: + // + // * Query performance is most efficient when the number of slices is equal to + // the number of shards in the index or backing index. If that number is large + // (for example, 500), choose a lower number as too many `slices` hurts + // performance. Setting `slices` higher than the number of shards generally does + // not improve efficiency and adds overhead. + // * Delete performance scales linearly across available resources with the + // number of slices. + // + // Whether query or delete performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Cancel a delete by query operation** + // + // Any delete by query can be canceled using the task cancel API. For example: + // + // ``` + // POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel + // ``` + // + // The task ID can be found by using the get tasks API. + // + // Cancellation should happen quickly but might take a few seconds. + // The get task status API will continue to list the delete by query task until + // this task checks that it has been cancelled and terminates itself. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html DeleteByQuery core_delete_by_query.NewDeleteByQuery - // Changes the number of requests per second for a particular Delete By Query + // Throttle a delete by query operation. + // + // Change the number of requests per second for a particular delete by query // operation. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html + // Rethrottling that speeds up the query takes effect immediately but + // rethrotting that slows down the query takes effect after completing the + // current batch to prevent scroll timeouts. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html#docs-delete-by-query-rethrottle DeleteByQueryRethrottle core_delete_by_query_rethrottle.NewDeleteByQueryRethrottle // Delete a script or search template. // Deletes a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-stored-script-api.html DeleteScript core_delete_script.NewDeleteScript // Check a document. - // Checks if a specified document exists. + // + // Verify that a document exists. + // For example, check to see if a document with the `_id` 0 exists: + // + // ``` + // HEAD my-index-000001/_doc/0 + // ``` + // + // If the document exists, the API returns a status code of `200 - OK`. + // If the document doesn’t exist, the API returns `404 - Not Found`. + // + // **Versioning support** + // + // You can use the `version` parameter to check the document only if its current + // version is equal to the specified one. + // + // Internally, Elasticsearch has marked the old document as deleted and added an + // entirely new document. + // The old version of the document doesn't disappear immediately, although you + // won't be able to access it. + // Elasticsearch cleans up deleted documents in the background as you continue + // to index more data. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html Exists core_exists.NewExists // Check for a document source. - // Checks if a document's `_source` is stored. + // + // Check whether a document source exists in an index. + // For example: + // + // ``` + // HEAD my-index-000001/_source/1 + // ``` + // + // A document's source is not available if it is disabled in the mapping. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html ExistsSource core_exists_source.NewExistsSource // Explain a document match result. - // Returns information about why a specific document matches, or doesn’t match, - // a query. + // Get information about why a specific document matches, or doesn't match, a + // query. + // It computes a score explanation for a query and a specific document. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html Explain core_explain.NewExplain - // The field capabilities API returns the information about the capabilities of - // fields among multiple indices. - // The field capabilities API returns runtime fields like any other field. For - // example, a runtime field with a type - // of keyword is returned as any other field that belongs to the `keyword` - // family. + // Get the field capabilities. + // + // Get information about the capabilities of fields among multiple indices. + // + // For data streams, the API returns field capabilities among the stream’s + // backing indices. + // It returns runtime fields like any other field. + // For example, a runtime field with a type of keyword is returned the same as + // any other field that belongs to the `keyword` family. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html FieldCaps core_field_caps.NewFieldCaps // Get a document by its ID. - // Retrieves the document with the specified ID from an index. + // + // Get a document and its source or stored fields from an index. + // + // By default, this API is realtime and is not affected by the refresh rate of + // the index (when data will become visible for search). + // In the case where stored fields are requested with the `stored_fields` + // parameter and the document has been updated but is not yet refreshed, the API + // will have to parse and analyze the source to extract the stored fields. + // To turn off realtime behavior, set the `realtime` parameter to false. + // + // **Source filtering** + // + // By default, the API returns the contents of the `_source` field unless you + // have used the `stored_fields` parameter or the `_source` field is turned off. + // You can turn off `_source` retrieval by using the `_source` parameter: + // + // ``` + // GET my-index-000001/_doc/0?_source=false + // ``` + // + // If you only need one or two fields from the `_source`, use the + // `_source_includes` or `_source_excludes` parameters to include or filter out + // particular fields. + // This can be helpful with large documents where partial retrieval can save on + // network overhead + // Both parameters take a comma separated list of fields or wildcard + // expressions. + // For example: + // + // ``` + // GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities + // ``` + // + // If you only want to specify includes, you can use a shorter notation: + // + // ``` + // GET my-index-000001/_doc/0?_source=*.id + // ``` + // + // **Routing** + // + // If routing is used during indexing, the routing value also needs to be + // specified to retrieve a document. + // For example: + // + // ``` + // GET my-index-000001/_doc/2?routing=user1 + // ``` + // + // This request gets the document with ID 2, but it is routed based on the user. + // The document is not fetched if the correct routing is not specified. + // + // **Distributed** + // + // The GET operation is hashed into a specific shard ID. + // It is then redirected to one of the replicas within that shard ID and returns + // the result. + // The replicas are the primary shard and its replicas within that shard ID + // group. + // This means that the more replicas you have, the better your GET scaling will + // be. + // + // **Versioning support** + // + // You can use the `version` parameter to retrieve the document only if its + // current version is equal to the specified one. + // + // Internally, Elasticsearch has marked the old document as deleted and added an + // entirely new document. + // The old version of the document doesn't disappear immediately, although you + // won't be able to access it. + // Elasticsearch cleans up deleted documents in the background as you continue + // to index more data. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html Get core_get.NewGet // Get a script or search template. // Retrieves a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-stored-script-api.html GetScript core_get_script.NewGetScript - // Returns all script contexts. - // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html + // Get script contexts. + // + // Get a list of supported script contexts and their methods. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-script-contexts-api.html GetScriptContext core_get_script_context.NewGetScriptContext - // Returns available script types, languages and contexts - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // Get script languages. + // + // Get a list of available script types, languages, and contexts. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-script-languages-api.html GetScriptLanguages core_get_script_languages.NewGetScriptLanguages // Get a document's source. - // Returns the source of a document. + // + // Get the source of a document. + // For example: + // + // ``` + // GET my-index-000001/_source/1 + // ``` + // + // You can use the source filtering parameters to control which parts of the + // `_source` are returned: + // + // ``` + // GET + // my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities + // ``` // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html GetSource core_get_source.NewGetSource - // Returns the health of the cluster. + // Get the cluster health. + // Get a report with the health status of an Elasticsearch cluster. + // The report contains a list of indicators that compose Elasticsearch + // functionality. + // + // Each indicator has a health status of: green, unknown, yellow or red. + // The indicator will provide an explanation and metadata describing the reason + // for its current health status. + // + // The cluster’s status is controlled by the worst indicator status. + // + // In the event that an indicator’s status is non-green, a list of impacts may + // be present in the indicator result which detail the functionalities that are + // negatively affected by the health issue. + // Each impact carries with it a severity level, an area of the system that is + // affected, and a simple description of the impact on the system. + // + // Some health indicators can determine the root cause of a health problem and + // prescribe a set of steps that can be performed in order to improve the health + // of the system. + // The root cause and remediation steps are encapsulated in a diagnosis. + // A diagnosis contains a cause detailing a root cause analysis, an action + // containing a brief description of the steps to take to fix the problem, the + // list of affected resources (if applicable), and a detailed step-by-step + // troubleshooting guide to fix the diagnosed problem. + // + // NOTE: The health indicators perform root cause analysis of non-green health + // statuses. This can be computationally expensive when called frequently. + // When setting up automated polling of the API for health status, set verbose + // to false to disable the more expensive analysis logic. // https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html HealthReport core_health_report.NewHealthReport - // Index a document. - // Adds a JSON document to the specified data stream or index and makes it + // Create or update a document in an index. + // + // Add a JSON document to the specified data stream or index and make it // searchable. // If the target is an index and the document already exists, the request // updates the document and increments its version. + // + // NOTE: You cannot use this API to send update requests for existing documents + // in a data stream. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or index alias: + // + // * To add or overwrite a document using the `PUT //_doc/<_id>` request + // format, you must have the `create`, `index`, or `write` index privilege. + // * To add a document using the `POST //_doc/` request format, you must + // have the `create_doc`, `create`, `index`, or `write` index privilege. + // * To automatically create a data stream or index with this API request, you + // must have the `auto_configure`, `create_index`, or `manage` index privilege. + // + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // NOTE: Replica shards might not all be started when an indexing operation + // returns successfully. + // By default, only the primary is required. Set `wait_for_active_shards` to + // change this default behavior. + // + // **Automatically create data streams and indices** + // + // If the request's target doesn't exist and matches an index template with a + // `data_stream` definition, the index operation automatically creates the data + // stream. + // + // If the target doesn't exist and doesn't match a data stream template, the + // operation automatically creates the index and applies any matching index + // templates. + // + // NOTE: Elasticsearch includes several built-in index templates. To avoid + // naming collisions with these templates, refer to index pattern documentation. + // + // If no mapping exists, the index operation creates a dynamic mapping. + // By default, new fields and objects are automatically added to the mapping if + // needed. + // + // Automatic index creation is controlled by the `action.auto_create_index` + // setting. + // If it is `true`, any index can be created automatically. + // You can modify this setting to explicitly allow or block automatic creation + // of indices that match specified patterns or set it to `false` to turn off + // automatic index creation entirely. + // Specify a comma-separated list of patterns you want to allow or prefix each + // pattern with `+` or `-` to indicate whether it should be allowed or blocked. + // When a list is specified, the default behaviour is to disallow. + // + // NOTE: The `action.auto_create_index` setting affects the automatic creation + // of indices only. + // It does not affect the creation of data streams. + // + // **Optimistic concurrency control** + // + // Index operations can be made conditional and only be performed if the last + // modification to the document was assigned the sequence number and primary + // term specified by the `if_seq_no` and `if_primary_term` parameters. + // If a mismatch is detected, the operation will result in a + // `VersionConflictException` and a status code of `409`. + // + // **Routing** + // + // By default, shard placement — or routing — is controlled by using a hash of + // the document's ID value. + // For more explicit control, the value fed into the hash function used by the + // router can be directly specified on a per-operation basis using the `routing` + // parameter. + // + // When setting up explicit mapping, you can also use the `_routing` field to + // direct the index operation to extract the routing value from the document + // itself. + // This does come at the (very minimal) cost of an additional document parsing + // pass. + // If the `_routing` mapping is defined and set to be required, the index + // operation will fail if no routing value is provided or extracted. + // + // NOTE: Data streams do not support custom routing unless they were created + // with the `allow_custom_routing` setting enabled in the template. + // + // **Distributed** + // + // The index operation is directed to the primary shard based on its route and + // performed on the actual node containing this shard. + // After the primary shard completes the operation, if needed, the update is + // distributed to applicable replicas. + // + // **Active shards** + // + // To improve the resiliency of writes to the system, indexing operations can be + // configured to wait for a certain number of active shard copies before + // proceeding with the operation. + // If the requisite number of active shard copies are not available, then the + // write operation must wait and retry, until either the requisite shard copies + // have started or a timeout occurs. + // By default, write operations only wait for the primary shards to be active + // before proceeding (that is to say `wait_for_active_shards` is `1`). + // This default can be overridden in the index settings dynamically by setting + // `index.write.wait_for_active_shards`. + // To alter this behavior per operation, use the `wait_for_active_shards + // request` parameter. + // + // Valid values are all or any positive integer up to the total number of + // configured copies per shard in the index (which is `number_of_replicas`+1). + // Specifying a negative value or a number greater than the number of shard + // copies will throw an error. + // + // For example, suppose you have a cluster of three nodes, A, B, and C and you + // create an index index with the number of replicas set to 3 (resulting in 4 + // shard copies, one more copy than there are nodes). + // If you attempt an indexing operation, by default the operation will only + // ensure the primary copy of each shard is available before proceeding. + // This means that even if B and C went down and A hosted the primary shard + // copies, the indexing operation would still proceed with only one copy of the + // data. + // If `wait_for_active_shards` is set on the request to `3` (and all three nodes + // are up), the indexing operation will require 3 active shard copies before + // proceeding. + // This requirement should be met because there are 3 active nodes in the + // cluster, each one holding a copy of the shard. + // However, if you set `wait_for_active_shards` to `all` (or to `4`, which is + // the same in this situation), the indexing operation will not proceed as you + // do not have all 4 copies of each shard active in the index. + // The operation will timeout unless a new node is brought up in the cluster to + // host the fourth copy of the shard. + // + // It is important to note that this setting greatly reduces the chances of the + // write operation not writing to the requisite number of shard copies, but it + // does not completely eliminate the possibility, because this check occurs + // before the write operation starts. + // After the write operation is underway, it is still possible for replication + // to fail on any number of shard copies but still succeed on the primary. + // The `_shards` section of the API response reveals the number of shard copies + // on which replication succeeded and failed. + // + // **No operation (noop) updates** + // + // When updating a document by using this API, a new version of the document is + // always created even if the document hasn't changed. + // If this isn't acceptable use the `_update` API with `detect_noop` set to + // `true`. + // The `detect_noop` option isn't available on this API because it doesn’t fetch + // the old source and isn't able to compare it against the new source. + // + // There isn't a definitive rule for when noop updates aren't acceptable. + // It's a combination of lots of factors like how frequently your data source + // sends updates that are actually noops and how many queries per second + // Elasticsearch runs on the shard receiving the updates. + // + // **Versioning** + // + // Each indexed document is given a version number. + // By default, internal versioning is used that starts at 1 and increments with + // each update, deletes included. + // Optionally, the version number can be set to an external value (for example, + // if maintained in a database). + // To enable this functionality, `version_type` should be set to `external`. + // The value provided must be a numeric, long value greater than or equal to 0, + // and less than around `9.2e+18`. + // + // NOTE: Versioning is completely real time, and is not affected by the near + // real time aspects of search operations. + // If no version is provided, the operation runs without any version checks. + // + // When using the external version type, the system checks to see if the version + // number passed to the index request is greater than the version of the + // currently stored document. + // If true, the document will be indexed and the new version number used. + // If the value provided is less than or equal to the stored document's version + // number, a version conflict will occur and the index operation will fail. For + // example: + // + // ``` + // PUT my-index-000001/_doc/1?version=2&version_type=external + // { + // "user": { + // "id": "elkbee" + // } + // } + // + // In this example, the operation will succeed since the supplied version of 2 + // is higher than the current document version of 1. + // If the document was already updated and its version was set to 2 or higher, + // the indexing command will fail and result in a conflict (409 HTTP status + // code). + // + // A nice side effect is that there is no need to maintain strict ordering of + // async indexing operations run as a result of changes to a source database, as + // long as version numbers from the source database are used. + // Even the simple case of updating the Elasticsearch index using data from a + // database is simplified if external versioning is used, as only the latest + // version will be used if the index operations arrive out of order. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html Index core_index.NewIndex // Get cluster info. - // Returns basic information about the cluster. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html + // Get basic build, version, and cluster information. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-api-root.html Info core_info.NewInfo - // Performs a kNN search. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html + // Run a knn search. + // + // NOTE: The kNN search API has been replaced by the `knn` option in the search + // API. + // + // Perform a k-nearest neighbor (kNN) search on a dense_vector field and return + // the matching documents. + // Given a query vector, the API finds the k closest vectors and returns those + // documents as search hits. + // + // Elasticsearch uses the HNSW algorithm to support efficient kNN search. + // Like most kNN algorithms, HNSW is an approximate method that sacrifices + // result accuracy for improved search speed. + // This means the results returned are not always the true k closest neighbors. + // + // The kNN search API supports restricting the search using a filter. + // The search will return the top k documents that also match the filter query. + // + // A kNN search response has the exact same structure as a search API response. + // However, certain sections have a meaning specific to kNN search: + // + // * The document `_score` is determined by the similarity between the query and + // document vector. + // * The `hits.total` object contains the total number of nearest neighbor + // candidates considered, which is `num_candidates * num_shards`. The + // `hits.total.relation` will always be `eq`, indicating an exact value. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html KnnSearch core_knn_search.NewKnnSearch - // Allows to get multiple documents in one request. + // Get multiple documents. + // + // Get multiple JSON documents by ID from one or more indices. + // If you specify an index in the request URI, you only need to specify the + // document IDs in the request body. + // To ensure fast responses, this multi get (mget) API responds with partial + // results if one or more shards fail. + // + // **Filter source fields** + // + // By default, the `_source` field is returned for every document (if stored). + // Use the `_source` and `_source_include` or `source_exclude` attributes to + // filter what fields are returned for a particular document. + // You can include the `_source`, `_source_includes`, and `_source_excludes` + // query parameters in the request URI to specify the defaults to use when there + // are no per-document instructions. + // + // **Get stored fields** + // + // Use the `stored_fields` attribute to specify the set of stored fields you + // want to retrieve. + // Any requested fields that are not stored are ignored. + // You can include the `stored_fields` query parameter in the request URI to + // specify the defaults to use when there are no per-document instructions. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html Mget core_mget.NewMget - // Allows to execute several search operations in one request. + // Run multiple searches. + // + // The format of the request is similar to the bulk API format and makes use of + // the newline delimited JSON (NDJSON) format. + // The structure is as follows: + // + // ``` + // header\n + // body\n + // header\n + // body\n + // ``` + // + // This structure is specifically optimized to reduce parsing if a specific + // search ends up redirected to another node. + // + // IMPORTANT: The final line of data must end with a newline character `\n`. + // Each newline character may be preceded by a carriage return `\r`. + // When sending requests to this endpoint the `Content-Type` header should be + // set to `application/x-ndjson`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html Msearch core_msearch.NewMsearch - // Runs multiple templated searches with a single request. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + // Run multiple templated searches. + // + // Run multiple templated searches with a single request. + // If you are providing a text file or text input to `curl`, use the + // `--data-binary` flag instead of `-d` to preserve newlines. + // For example: + // + // ``` + // $ cat requests + // { "index": "my-index" } + // { "id": "my-search-template", "params": { "query_string": "hello world", + // "from": 0, "size": 10 }} + // { "index": "my-other-index" } + // { "id": "my-other-search-template", "params": { "query_type": "match_all" }} + // + // $ curl -H "Content-Type: application/x-ndjson" -XGET + // localhost:9200/_msearch/template --data-binary "@requests"; echo + // ``` + // https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-search-template.html MsearchTemplate core_msearch_template.NewMsearchTemplate - // Returns multiple termvectors in one request. + // Get multiple term vectors. + // + // Get multiple term vectors with a single request. + // You can specify existing documents by index and ID or provide artificial + // documents in the body of the request. + // You can specify the index in the request body or request URI. + // The response contains a `docs` array with all the fetched termvectors. + // Each element has the structure provided by the termvectors API. + // + // **Artificial documents** + // + // You can also use `mtermvectors` to generate term vectors for artificial + // documents provided in the body of the request. + // The mapping used is determined by the specified `_index`. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html Mtermvectors core_mtermvectors.NewMtermvectors - // A search request by default executes against the most recent visible data of - // the target indices, + // Open a point in time. + // + // A search request by default runs against the most recent visible data of the + // target indices, // which is called point in time. Elasticsearch pit (point in time) is a // lightweight view into the // state of the data as it existed when initiated. In some cases, it’s preferred @@ -3263,68 +9300,726 @@ type API struct { // `search_after` requests, then the results of those requests might not be // consistent as changes happening // between searches are only visible to the more recent point in time. + // + // A point in time must be opened explicitly before being used in search + // requests. + // + // A subsequent search request with the `pit` parameter must not specify + // `index`, `routing`, or `preference` values as these parameters are copied + // from the point in time. + // + // Just like regular searches, you can use `from` and `size` to page through + // point in time search results, up to the first 10,000 hits. + // If you want to retrieve more hits, use PIT with `search_after`. + // + // IMPORTANT: The open point in time request and each subsequent search request + // can return different identifiers; always use the most recently received ID + // for the next search request. + // + // When a PIT that contains shard failures is used in a search request, the + // missing are always reported in the search response as a + // `NoShardAvailableActionException` exception. + // To get rid of these exceptions, a new PIT needs to be created so that shards + // missing from the previous PIT can be handled, assuming they become available + // in the meantime. + // + // **Keeping point in time alive** + // + // The `keep_alive` parameter, which is passed to a open point in time request + // and search request, extends the time to live of the corresponding point in + // time. + // The value does not need to be long enough to process all data — it just needs + // to be long enough for the next request. + // + // Normally, the background merge process optimizes the index by merging + // together smaller segments to create new, bigger segments. + // Once the smaller segments are no longer needed they are deleted. + // However, open point-in-times prevent the old segments from being deleted + // since they are still in use. + // + // TIP: Keeping older segments alive means that more disk space and file handles + // are needed. + // Ensure that you have configured your nodes to have ample free file handles. + // + // Additionally, if a segment contains deleted or updated documents then the + // point in time must keep track of whether each document in the segment was + // live at the time of the initial search request. + // Ensure that your nodes have sufficient heap space if you have many open + // point-in-times on an index that is subject to ongoing deletes or updates. + // Note that a point-in-time doesn't prevent its associated indices from being + // deleted. + // You can check how many point-in-times (that is, search contexts) are open + // with the nodes stats API. // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html OpenPointInTime core_open_point_in_time.NewOpenPointInTime // Ping the cluster. - // Returns whether the cluster is running. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html + // Get information about whether the cluster is running. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html Ping core_ping.NewPing // Create or update a script or search template. // Creates or updates a stored script or search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html + // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-stored-script-api.html PutScript core_put_script.NewPutScript - // Enables you to evaluate the quality of ranked search results over a set of - // typical search queries. + // Evaluate ranked search results. + // + // Evaluate the quality of ranked search results over a set of typical search + // queries. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html RankEval core_rank_eval.NewRankEval // Reindex documents. - // Copies documents from a source to a destination. The source can be any - // existing index, alias, or data stream. The destination must differ from the - // source. For example, you cannot reindex a data stream into itself. + // + // Copy documents from a source to a destination. + // You can copy all documents to the destination index or reindex a subset of + // the documents. + // The source can be any existing index, alias, or data stream. + // The destination must differ from the source. + // For example, you cannot reindex a data stream into itself. + // + // IMPORTANT: Reindex requires `_source` to be enabled for all documents in the + // source. + // The destination should be configured as wanted before calling the reindex + // API. + // Reindex does not copy the settings from the source or its associated + // template. + // Mappings, shard counts, and replicas, for example, must be configured ahead + // of time. + // + // If the Elasticsearch security features are enabled, you must have the + // following security privileges: + // + // * The `read` index privilege for the source data stream, index, or alias. + // * The `write` index privilege for the destination data stream, index, or + // index alias. + // * To automatically create a data stream or index with a reindex API request, + // you must have the `auto_configure`, `create_index`, or `manage` index + // privilege for the destination data stream, index, or alias. + // * If reindexing from a remote cluster, the `source.remote.user` must have the + // `monitor` cluster privilege and the `read` index privilege for the source + // data stream, index, or alias. + // + // If reindexing from a remote cluster, you must explicitly allow the remote + // host in the `reindex.remote.whitelist` setting. + // Automatic data stream creation requires a matching index template with data + // stream enabled. + // + // The `dest` element can be configured like the index API to control optimistic + // concurrency control. + // Omitting `version_type` or setting it to `internal` causes Elasticsearch to + // blindly dump documents into the destination, overwriting any that happen to + // have the same ID. + // + // Setting `version_type` to `external` causes Elasticsearch to preserve the + // `version` from the source, create any documents that are missing, and update + // any documents that have an older version in the destination than they do in + // the source. + // + // Setting `op_type` to `create` causes the reindex API to create only missing + // documents in the destination. + // All existing documents will cause a version conflict. + // + // IMPORTANT: Because data streams are append-only, any reindex request to a + // destination data stream must have an `op_type` of `create`. + // A reindex can only add new documents to a destination data stream. + // It cannot update existing documents in a destination data stream. + // + // By default, version conflicts abort the reindex process. + // To continue reindexing if there are conflicts, set the `conflicts` request + // body property to `proceed`. + // In this case, the response includes a count of the version conflicts that + // were encountered. + // Note that the handling of other error types is unaffected by the `conflicts` + // property. + // Additionally, if you opt to count version conflicts, the operation could + // attempt to reindex more documents from the source than `max_docs` until it + // has successfully indexed `max_docs` documents into the target or it has gone + // through every document in the source query. + // + // NOTE: The reindex API makes no effort to handle ID collisions. + // The last document written will "win" but the order isn't usually predictable + // so it is not a good idea to rely on this behavior. + // Instead, make sure that IDs are unique by using a script. + // + // **Running reindex asynchronously** + // + // If the request contains `wait_for_completion=false`, Elasticsearch performs + // some preflight checks, launches the request, and returns a task you can use + // to cancel or get the status of the task. + // Elasticsearch creates a record of this task as a document at + // `_tasks/`. + // + // **Reindex from multiple sources** + // + // If you have many sources to reindex it is generally better to reindex them + // one at a time rather than using a glob pattern to pick up multiple sources. + // That way you can resume the process if there are any errors by removing the + // partially completed source and starting over. + // It also makes parallelizing the process fairly simple: split the list of + // sources to reindex and run each list in parallel. + // + // For example, you can use a bash script like this: + // + // ``` + // for index in i1 i2 i3 i4 i5; do + // curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty + // -d'{ + // "source": { + // "index": "'$index'" + // }, + // "dest": { + // "index": "'$index'-reindexed" + // } + // }' + // done + // ``` + // + // **Throttling** + // + // Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, + // for example) to throttle the rate at which reindex issues batches of index + // operations. + // Requests are throttled by padding each batch with a wait time. + // To turn off throttling, set `requests_per_second` to `-1`. + // + // The throttling is done by waiting between batches so that the scroll that + // reindex uses internally can be given a timeout that takes into account the + // padding. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is `1000`, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single bulk request, large batch sizes cause + // Elasticsearch to create many requests and then wait for a while before + // starting the next set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Reindex supports sliced scroll to parallelize the reindexing process. + // This parallelization can improve efficiency and provide a convenient way to + // break the request down into smaller parts. + // + // NOTE: Reindexing from remote clusters does not support manual or automatic + // slicing. + // + // You can slice a reindex request manually by providing a slice ID and total + // number of slices to each request. + // You can also let reindex automatically parallelize by using sliced scroll to + // slice on `_id`. + // The `slices` parameter specifies the number of slices to use. + // + // Adding `slices` to the reindex request just automates the manual process, + // creating sub-requests which means it has some quirks: + // + // * You can see these requests in the tasks API. These sub-requests are "child" + // tasks of the task for the request with slices. + // * Fetching the status of the task for the request with `slices` only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with `slices` will cancel each sub-request. + // * Due to the nature of `slices`, each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // `slices` are distributed proportionally to each sub-request. Combine that + // with the previous point about distribution being uneven and you should + // conclude that using `max_docs` with `slices` might not result in exactly + // `max_docs` documents being reindexed. + // * Each sub-request gets a slightly different snapshot of the source, though + // these are all taken at approximately the same time. + // + // If slicing automatically, setting `slices` to `auto` will choose a reasonable + // number for most indices. + // If slicing manually or otherwise tuning automatic slicing, use the following + // guidelines. + // + // Query performance is most efficient when the number of slices is equal to the + // number of shards in the index. + // If that number is large (for example, `500`), choose a lower number as too + // many slices will hurt performance. + // Setting slices higher than the number of shards generally does not improve + // efficiency and adds overhead. + // + // Indexing performance scales linearly across available resources with the + // number of slices. + // + // Whether query or indexing performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Modify documents during reindexing** + // + // Like `_update_by_query`, reindex operations support a script that modifies + // the document. + // Unlike `_update_by_query`, the script is allowed to modify the document's + // metadata. + // + // Just as in `_update_by_query`, you can set `ctx.op` to change the operation + // that is run on the destination. + // For example, set `ctx.op` to `noop` if your script decides that the document + // doesn’t have to be indexed in the destination. This "no operation" will be + // reported in the `noop` counter in the response body. + // Set `ctx.op` to `delete` if your script decides that the document must be + // deleted from the destination. + // The deletion will be reported in the `deleted` counter in the response body. + // Setting `ctx.op` to anything else will return an error, as will setting any + // other field in `ctx`. + // + // Think of the possibilities! Just be careful; you are able to change: + // + // * `_id` + // * `_index` + // * `_version` + // * `_routing` + // + // Setting `_version` to `null` or clearing it from the `ctx` map is just like + // not sending the version in an indexing request. + // It will cause the document to be overwritten in the destination regardless of + // the version on the target or the version type you use in the reindex API. + // + // **Reindex from remote** + // + // Reindex supports reindexing from a remote Elasticsearch cluster. + // The `host` parameter must contain a scheme, host, port, and optional path. + // The `username` and `password` parameters are optional and when they are + // present the reindex operation will connect to the remote Elasticsearch node + // using basic authentication. + // Be sure to use HTTPS when using basic authentication or the password will be + // sent in plain text. + // There are a range of settings available to configure the behavior of the + // HTTPS connection. + // + // When using Elastic Cloud, it is also possible to authenticate against the + // remote cluster through the use of a valid API key. + // Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` + // setting. + // It can be set to a comma delimited list of allowed remote host and port + // combinations. + // Scheme is ignored; only the host and port are used. + // For example: + // + // ``` + // reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, + // localhost:*"] + // ``` + // + // The list of allowed hosts must be configured on any nodes that will + // coordinate the reindex. + // This feature should work with remote clusters of any version of + // Elasticsearch. + // This should enable you to upgrade from any version of Elasticsearch to the + // current version by reindexing from a cluster of the old version. + // + // WARNING: Elasticsearch does not support forward compatibility across major + // versions. + // For example, you cannot reindex from a 7.x cluster into a 6.x cluster. + // + // To enable queries sent to older versions of Elasticsearch, the `query` + // parameter is sent directly to the remote host without validation or + // modification. + // + // NOTE: Reindexing from remote clusters does not support manual or automatic + // slicing. + // + // Reindexing from a remote server uses an on-heap buffer that defaults to a + // maximum size of 100mb. + // If the remote index includes very large documents you'll need to use a + // smaller batch size. + // It is also possible to set the socket read timeout on the remote connection + // with the `socket_timeout` field and the connection timeout with the + // `connect_timeout` field. + // Both default to 30 seconds. + // + // **Configuring SSL parameters** + // + // Reindex from remote supports configurable SSL settings. + // These must be specified in the `elasticsearch.yml` file, with the exception + // of the secure settings, which you add in the Elasticsearch keystore. + // It is not possible to configure SSL in the body of the reindex request. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html Reindex core_reindex.NewReindex - // Copies documents from a source to a destination. + // Throttle a reindex operation. + // + // Change the number of requests per second for a particular reindex operation. + // For example: + // + // ``` + // POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 + // ``` + // + // Rethrottling that speeds up the query takes effect immediately. + // Rethrottling that slows down the query will take effect after completing the + // current batch. + // This behavior prevents scroll timeouts. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html ReindexRethrottle core_reindex_rethrottle.NewReindexRethrottle - // Renders a search template as a search request body. + // Render a search template. + // + // Render a search template as a search request body. // https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html RenderSearchTemplate core_render_search_template.NewRenderSearchTemplate // Run a script. + // // Runs a script and returns a result. + // Use this API to build and test scripts, such as when defining a script for a + // runtime field. + // This API requires very few dependencies and is especially useful if you don't + // have permissions to write documents on a cluster. + // + // The API uses several _contexts_, which control how scripts are run, what + // variables are available at runtime, and what the return type is. + // + // Each context requires a script, but additional parameters depend on the + // context you're using for that script. // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html ScriptsPainlessExecute core_scripts_painless_execute.NewScriptsPainlessExecute - // Allows to retrieve a large numbers of results from a single search request. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll + // Run a scrolling search. + // + // IMPORTANT: The scroll API is no longer recommend for deep pagination. If you + // need to preserve the index state while paging through more than 10,000 hits, + // use the `search_after` parameter with a point in time (PIT). + // + // The scroll API gets large sets of results from a single scrolling search + // request. + // To get the necessary scroll ID, submit a search API request that includes an + // argument for the `scroll` query parameter. + // The `scroll` parameter indicates how long Elasticsearch should retain the + // search context for the request. + // The search response returns a scroll ID in the `_scroll_id` response body + // parameter. + // You can then use the scroll ID with the scroll API to retrieve the next batch + // of results for the request. + // If the Elasticsearch security features are enabled, the access to the results + // of a specific scroll ID is restricted to the user or API key that submitted + // the search. + // + // You can also use the scroll API to specify a new scroll parameter that + // extends or shortens the retention period for the search context. + // + // IMPORTANT: Results from a scrolling search reflect the state of the index at + // the time of the initial search request. Subsequent indexing or document + // changes only affect later search and scroll requests. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/scroll-api.html Scroll core_scroll.NewScroll - // Returns search hits that match the query defined in the request. + // Run a search. + // + // Get search hits that match the query defined in the request. // You can provide search queries using the `q` query string parameter or the // request body. // If both are specified, only the query parameter is used. + // + // If the Elasticsearch security features are enabled, you must have the read + // index privilege for the target data stream, index, or alias. For + // cross-cluster search, refer to the documentation about configuring CCS + // privileges. + // To search a point in time (PIT) for an alias, you must have the `read` index + // privilege for the alias's data streams or indices. + // + // **Search slicing** + // + // When paging through a large number of documents, it can be helpful to split + // the search into multiple slices to consume them independently with the + // `slice` and `pit` properties. + // By default the splitting is done first on the shards, then locally on each + // shard. + // The local splitting partitions the shard into contiguous ranges based on + // Lucene document IDs. + // + // For instance if the number of shards is equal to 2 and you request 4 slices, + // the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are + // assigned to the second shard. + // + // IMPORTANT: The same point-in-time ID should be used for all slices. + // If different PIT IDs are used, slices can overlap and miss documents. + // This situation can occur because the splitting criterion is based on Lucene + // document IDs, which are not stable across changes to the index. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html Search core_search.NewSearch // Search a vector tile. - // Searches a vector tile for geospatial values. + // + // Search a vector tile for geospatial values. + // Before using this API, you should be familiar with the Mapbox vector tile + // specification. + // The API returns results as a binary mapbox vector tile. + // + // Internally, Elasticsearch translates a vector tile search API request into a + // search containing: + // + // * A `geo_bounding_box` query on the ``. The query uses the + // `//` tile as a bounding box. + // * A `geotile_grid` or `geohex_grid` aggregation on the ``. The + // `grid_agg` parameter determines the aggregation type. The aggregation uses + // the `//` tile as a bounding box. + // * Optionally, a `geo_bounds` aggregation on the ``. The search only + // includes this aggregation if the `exact_bounds` parameter is `true`. + // * If the optional parameter `with_labels` is `true`, the internal search will + // include a dynamic runtime field that calls the `getLabelPosition` function of + // the geometry doc value. This enables the generation of new point features + // containing suggested geometry labels, so that, for example, multi-polygons + // will have only one label. + // + // For example, Elasticsearch may translate a vector tile search API request + // with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of + // `true` into the following search + // + // ``` + // GET my-index/_search + // { + // "size": 10000, + // "query": { + // "geo_bounding_box": { + // "my-geo-field": { + // "top_left": { + // "lat": -40.979898069620134, + // "lon": -45 + // }, + // "bottom_right": { + // "lat": -66.51326044311186, + // "lon": 0 + // } + // } + // } + // }, + // "aggregations": { + // "grid": { + // "geotile_grid": { + // "field": "my-geo-field", + // "precision": 11, + // "size": 65536, + // "bounds": { + // "top_left": { + // "lat": -40.979898069620134, + // "lon": -45 + // }, + // "bottom_right": { + // "lat": -66.51326044311186, + // "lon": 0 + // } + // } + // } + // }, + // "bounds": { + // "geo_bounds": { + // "field": "my-geo-field", + // "wrap_longitude": false + // } + // } + // } + // } + // ``` + // + // The API returns results as a binary Mapbox vector tile. + // Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the + // tile contains three layers: + // + // * A `hits` layer containing a feature for each `` value matching the + // `geo_bounding_box` query. + // * An `aggs` layer containing a feature for each cell of the `geotile_grid` or + // `geohex_grid`. The layer only contains features for cells with matching data. + // * A meta layer containing: + // * A feature containing a bounding box. By default, this is the bounding box + // of the tile. + // * Value ranges for any sub-aggregations on the `geotile_grid` or + // `geohex_grid`. + // * Metadata for the search. + // + // The API only returns features that can display at its zoom level. + // For example, if a polygon feature has no area at its zoom level, the API + // omits it. + // The API returns errors as UTF-8 encoded JSON. + // + // IMPORTANT: You can specify several options for this API as either a query + // parameter or request body parameter. + // If you specify both parameters, the query parameter takes precedence. + // + // **Grid precision for geotile** + // + // For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles + // for lower zoom levels. + // `grid_precision` represents the additional zoom levels available through + // these cells. The final precision is computed by as follows: ` + + // grid_precision`. + // For example, if `` is 7 and `grid_precision` is 8, then the + // `geotile_grid` aggregation will use a precision of 15. + // The maximum final precision is 29. + // The `grid_precision` also determines the number of cells for the grid as + // follows: `(2^grid_precision) x (2^grid_precision)`. + // For example, a value of 8 divides the tile into a grid of 256 x 256 cells. + // The `aggs` layer only contains features for cells with matching data. + // + // **Grid precision for geohex** + // + // For a `grid_agg` of `geohex`, Elasticsearch uses `` and + // `grid_precision` to calculate a final precision as follows: ` + + // grid_precision`. + // + // This precision determines the H3 resolution of the hexagonal cells produced + // by the `geohex` aggregation. + // The following table maps the H3 resolution for each precision. + // For example, if `` is 3 and `grid_precision` is 3, the precision is 6. + // At a precision of 6, hexagonal cells have an H3 resolution of 2. + // If `` is 3 and `grid_precision` is 4, the precision is 7. + // At a precision of 7, hexagonal cells have an H3 resolution of 3. + // + // | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | + // | --------- | ---------------- | ------------- | ----------------| ----- | + // | 1 | 4 | 0 | 122 | 30.5 | + // | 2 | 16 | 0 | 122 | 7.625 | + // | 3 | 64 | 1 | 842 | 13.15625 | + // | 4 | 256 | 1 | 842 | 3.2890625 | + // | 5 | 1024 | 2 | 5882 | 5.744140625 | + // | 6 | 4096 | 2 | 5882 | 1.436035156 | + // | 7 | 16384 | 3 | 41162 | 2.512329102 | + // | 8 | 65536 | 3 | 41162 | 0.6280822754 | + // | 9 | 262144 | 4 | 288122 | 1.099098206 | + // | 10 | 1048576 | 4 | 288122 | 0.2747745514 | + // | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | + // | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | + // | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | + // | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | + // | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | + // | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | + // | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | + // | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | + // | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | + // | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | + // | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | + // | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | + // | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | + // | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | + // | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | + // | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | + // | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | + // | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | + // | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + // + // Hexagonal cells don't align perfectly on a vector tile. + // Some cells may intersect more than one vector tile. + // To compute the H3 resolution for each precision, Elasticsearch compares the + // average density of hexagonal bins at each resolution with the average density + // of tile bins at each zoom level. + // Elasticsearch uses the H3 resolution that is closest to the corresponding + // geotile density. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html SearchMvt core_search_mvt.NewSearchMvt - // Returns information about the indices and shards that a search request would - // be executed against. + // Get the search shards. + // + // Get the indices and shards that a search request would be run against. + // This information can be useful for working out issues or planning + // optimizations with routing and shard preferences. + // When filtered aliases are used, the filter is returned as part of the + // `indices` section. + // + // If the Elasticsearch security features are enabled, you must have the + // `view_index_metadata` or `manage` index privilege for the target data stream, + // index, or alias. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html SearchShards core_search_shards.NewSearchShards - // Runs a search with a search template. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html + // Run a search with a search template. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template-api.html SearchTemplate core_search_template.NewSearchTemplate - // The terms enum API can be used to discover terms in the index that begin - // with the provided string. It is designed for low-latency look-ups used in - // auto-complete scenarios. + // Get terms in an index. + // + // Discover terms that match a partial string in an index. + // This API is designed for low-latency look-ups used in auto-complete + // scenarios. + // + // > info + // > The terms enum API may return terms from deleted documents. Deleted + // documents are initially only marked as deleted. It is not until their + // segments are merged that documents are actually deleted. Until that happens, + // the terms enum API will return terms from these documents. // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html TermsEnum core_terms_enum.NewTermsEnum // Get term vector information. - // Returns information and statistics about terms in the fields of a particular + // + // Get information and statistics about terms in the fields of a particular // document. + // + // You can retrieve term vectors for documents stored in the index or for + // artificial documents passed in the body of the request. + // You can specify the fields you are interested in through the `fields` + // parameter or by adding the fields to the request body. + // For example: + // + // ``` + // GET /my-index-000001/_termvectors/1?fields=message + // ``` + // + // Fields can be specified using wildcards, similar to the multi match query. + // + // Term vectors are real-time by default, not near real-time. + // This can be changed by setting `realtime` parameter to `false`. + // + // You can request three types of values: _term information_, _term statistics_, + // and _field statistics_. + // By default, all term information and field statistics are returned for all + // fields but term statistics are excluded. + // + // **Term information** + // + // * term frequency in the field (always returned) + // * term positions (`positions: true`) + // * start and end offsets (`offsets: true`) + // * term payloads (`payloads: true`), as base64 encoded bytes + // + // If the requested information wasn't stored in the index, it will be computed + // on the fly if possible. + // Additionally, term vectors could be computed for documents not even existing + // in the index, but instead provided by the user. + // + // > warn + // > Start and end offsets assume UTF-16 encoding is being used. If you want to + // use these offsets in order to get the original text that produced this token, + // you should make sure that the string you are taking a sub-string of is also + // encoded using UTF-16. + // + // **Behaviour** + // + // The term and field statistics are not accurate. + // Deleted documents are not taken into account. + // The information is only retrieved for the shard the requested document + // resides in. + // The term and field statistics are therefore only useful as relative measures + // whereas the absolute numbers have no meaning in this context. + // By default, when requesting term vectors of artificial documents, a shard to + // get the statistics from is randomly selected. + // Use `routing` only to hit a particular shard. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html Termvectors core_termvectors.NewTermvectors // Update a document. - // Updates a document by running a script or passing a partial document. + // + // Update a document by running a script or passing a partial document. + // + // If the Elasticsearch security features are enabled, you must have the `index` + // or `write` index privilege for the target index or index alias. + // + // The script can update, delete, or skip modifying the document. + // The API also supports passing a partial document, which is merged into the + // existing document. + // To fully replace an existing document, use the index API. + // This operation: + // + // * Gets the document (collocated with the shard) from the index. + // * Runs the specified script. + // * Indexes the result. + // + // The document must still be reindexed, but using this API removes some network + // roundtrips and reduces chances of version conflicts between the GET and the + // index operation. + // + // The `_source` field must be enabled to use this API. + // In addition to `_source`, you can access the following variables through the + // `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the + // current timestamp). // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html Update core_update.NewUpdate // Update documents. @@ -3332,11 +10027,148 @@ type API struct { // If no query is specified, performs an update on every document in the data // stream or index without modifying the source, which is useful for picking up // mapping changes. + // + // If the Elasticsearch security features are enabled, you must have the + // following index privileges for the target data stream, index, or alias: + // + // * `read` + // * `index` or `write` + // + // You can specify the query criteria in the request URI or the request body + // using the same syntax as the search API. + // + // When you submit an update by query request, Elasticsearch gets a snapshot of + // the data stream or index when it begins processing the request and updates + // matching documents using internal versioning. + // When the versions match, the document is updated and the version number is + // incremented. + // If a document changes between the time that the snapshot is taken and the + // update operation is processed, it results in a version conflict and the + // operation fails. + // You can opt to count version conflicts instead of halting and returning by + // setting `conflicts` to `proceed`. + // Note that if you opt to count version conflicts, the operation could attempt + // to update more documents from the source than `max_docs` until it has + // successfully updated `max_docs` documents or it has gone through every + // document in the source query. + // + // NOTE: Documents with a version equal to 0 cannot be updated using update by + // query because internal versioning does not support 0 as a valid version + // number. + // + // While processing an update by query request, Elasticsearch performs multiple + // search requests sequentially to find all of the matching documents. + // A bulk update request is performed for each batch of matching documents. + // Any query or update failures cause the update by query request to fail and + // the failures are shown in the response. + // Any update requests that completed successfully still stick, they are not + // rolled back. + // + // **Throttling update requests** + // + // To control the rate at which update by query issues batches of update + // operations, you can set `requests_per_second` to any positive decimal number. + // This pads each batch with a wait time to throttle the rate. + // Set `requests_per_second` to `-1` to turn off throttling. + // + // Throttling uses a wait time between batches so that the internal scroll + // requests can be given a timeout that takes the request padding into account. + // The padding time is the difference between the batch size divided by the + // `requests_per_second` and the time spent writing. + // By default the batch size is 1000, so if `requests_per_second` is set to + // `500`: + // + // ``` + // target_time = 1000 / 500 per second = 2 seconds + // wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds + // ``` + // + // Since the batch is issued as a single _bulk request, large batch sizes cause + // Elasticsearch to create many requests and wait before starting the next set. + // This is "bursty" instead of "smooth". + // + // **Slicing** + // + // Update by query supports sliced scroll to parallelize the update process. + // This can improve efficiency and provide a convenient way to break the request + // down into smaller parts. + // + // Setting `slices` to `auto` chooses a reasonable number for most data streams + // and indices. + // This setting will use one slice per shard, up to a certain limit. + // If there are multiple source data streams or indices, it will choose the + // number of slices based on the index or backing index with the smallest number + // of shards. + // + // Adding `slices` to `_update_by_query` just automates the manual process of + // creating sub-requests, which means it has some quirks: + // + // * You can see these requests in the tasks APIs. These sub-requests are + // "child" tasks of the task for the request with slices. + // * Fetching the status of the task for the request with `slices` only contains + // the status of completed slices. + // * These sub-requests are individually addressable for things like + // cancellation and rethrottling. + // * Rethrottling the request with `slices` will rethrottle the unfinished + // sub-request proportionally. + // * Canceling the request with slices will cancel each sub-request. + // * Due to the nature of slices each sub-request won't get a perfectly even + // portion of the documents. All documents will be addressed, but some slices + // may be larger than others. Expect larger slices to have a more even + // distribution. + // * Parameters like `requests_per_second` and `max_docs` on a request with + // slices are distributed proportionally to each sub-request. Combine that with + // the point above about distribution being uneven and you should conclude that + // using `max_docs` with `slices` might not result in exactly `max_docs` + // documents being updated. + // * Each sub-request gets a slightly different snapshot of the source data + // stream or index though these are all taken at approximately the same time. + // + // If you're slicing manually or otherwise tuning automatic slicing, keep in + // mind that: + // + // * Query performance is most efficient when the number of slices is equal to + // the number of shards in the index or backing index. If that number is large + // (for example, 500), choose a lower number as too many slices hurts + // performance. Setting slices higher than the number of shards generally does + // not improve efficiency and adds overhead. + // * Update performance scales linearly across available resources with the + // number of slices. + // + // Whether query or update performance dominates the runtime depends on the + // documents being reindexed and cluster resources. + // + // **Update the document source** + // + // Update by query supports scripts to update the document source. + // As with the update API, you can set `ctx.op` to change the operation that is + // performed. + // + // Set `ctx.op = "noop"` if your script decides that it doesn't have to make any + // changes. + // The update by query operation skips updating the document and increments the + // `noop` counter. + // + // Set `ctx.op = "delete"` if your script decides that the document should be + // deleted. + // The update by query operation deletes the document and increments the + // `deleted` counter. + // + // Update by query supports only `index`, `noop`, and `delete`. + // Setting `ctx.op` to anything else is an error. + // Setting any other field in `ctx` is an error. + // This API enables you to only modify the source of matching documents; you + // cannot move them. // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html UpdateByQuery core_update_by_query.NewUpdateByQuery - // Changes the number of requests per second for a particular Update By Query + // Throttle an update by query operation. + // + // Change the number of requests per second for a particular update by query // operation. - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html + // Rethrottling that speeds up the query takes effect immediately but + // rethrotting that slows down the query takes effect after completing the + // current batch to prevent scroll timeouts. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html#docs-update-by-query-rethrottle UpdateByQueryRethrottle core_update_by_query_rethrottle.NewUpdateByQueryRethrottle } @@ -3441,14 +10273,19 @@ func New(tp elastictransport.Interface) *API { Put: connector_put.NewPutFunc(tp), SecretPost: connector_secret_post.NewSecretPostFunc(tp), SyncJobCancel: connector_sync_job_cancel.NewSyncJobCancelFunc(tp), + SyncJobCheckIn: connector_sync_job_check_in.NewSyncJobCheckInFunc(tp), + SyncJobClaim: connector_sync_job_claim.NewSyncJobClaimFunc(tp), SyncJobDelete: connector_sync_job_delete.NewSyncJobDeleteFunc(tp), + SyncJobError: connector_sync_job_error.NewSyncJobErrorFunc(tp), SyncJobGet: connector_sync_job_get.NewSyncJobGetFunc(tp), SyncJobList: connector_sync_job_list.NewSyncJobListFunc(tp), SyncJobPost: connector_sync_job_post.NewSyncJobPostFunc(tp), + SyncJobUpdateStats: connector_sync_job_update_stats.NewSyncJobUpdateStatsFunc(tp), UpdateActiveFiltering: connector_update_active_filtering.NewUpdateActiveFilteringFunc(tp), UpdateApiKeyId: connector_update_api_key_id.NewUpdateApiKeyIdFunc(tp), UpdateConfiguration: connector_update_configuration.NewUpdateConfigurationFunc(tp), UpdateError: connector_update_error.NewUpdateErrorFunc(tp), + UpdateFeatures: connector_update_features.NewUpdateFeaturesFunc(tp), UpdateFiltering: connector_update_filtering.NewUpdateFilteringFunc(tp), UpdateFilteringValidation: connector_update_filtering_validation.NewUpdateFilteringValidationFunc(tp), UpdateIndexName: connector_update_index_name.NewUpdateIndexNameFunc(tp), @@ -3534,8 +10371,11 @@ func New(tp elastictransport.Interface) *API { // Esql Esql: Esql{ - AsyncQuery: esql_async_query.NewAsyncQueryFunc(tp), - Query: esql_query.NewQueryFunc(tp), + AsyncQuery: esql_async_query.NewAsyncQueryFunc(tp), + AsyncQueryDelete: esql_async_query_delete.NewAsyncQueryDeleteFunc(tp), + AsyncQueryGet: esql_async_query_get.NewAsyncQueryGetFunc(tp), + AsyncQueryStop: esql_async_query_stop.NewAsyncQueryStopFunc(tp), + Query: esql_query.NewQueryFunc(tp), }, // Features @@ -3574,83 +10414,102 @@ func New(tp elastictransport.Interface) *API { // Indices Indices: Indices{ - AddBlock: indices_add_block.NewAddBlockFunc(tp), - Analyze: indices_analyze.NewAnalyzeFunc(tp), - ClearCache: indices_clear_cache.NewClearCacheFunc(tp), - Clone: indices_clone.NewCloneFunc(tp), - Close: indices_close.NewCloseFunc(tp), - Create: indices_create.NewCreateFunc(tp), - CreateDataStream: indices_create_data_stream.NewCreateDataStreamFunc(tp), - DataStreamsStats: indices_data_streams_stats.NewDataStreamsStatsFunc(tp), - Delete: indices_delete.NewDeleteFunc(tp), - DeleteAlias: indices_delete_alias.NewDeleteAliasFunc(tp), - DeleteDataLifecycle: indices_delete_data_lifecycle.NewDeleteDataLifecycleFunc(tp), - DeleteDataStream: indices_delete_data_stream.NewDeleteDataStreamFunc(tp), - DeleteIndexTemplate: indices_delete_index_template.NewDeleteIndexTemplateFunc(tp), - DeleteTemplate: indices_delete_template.NewDeleteTemplateFunc(tp), - DiskUsage: indices_disk_usage.NewDiskUsageFunc(tp), - Downsample: indices_downsample.NewDownsampleFunc(tp), - Exists: indices_exists.NewExistsFunc(tp), - ExistsAlias: indices_exists_alias.NewExistsAliasFunc(tp), - ExistsIndexTemplate: indices_exists_index_template.NewExistsIndexTemplateFunc(tp), - ExistsTemplate: indices_exists_template.NewExistsTemplateFunc(tp), - ExplainDataLifecycle: indices_explain_data_lifecycle.NewExplainDataLifecycleFunc(tp), - FieldUsageStats: indices_field_usage_stats.NewFieldUsageStatsFunc(tp), - Flush: indices_flush.NewFlushFunc(tp), - Forcemerge: indices_forcemerge.NewForcemergeFunc(tp), - Get: indices_get.NewGetFunc(tp), - GetAlias: indices_get_alias.NewGetAliasFunc(tp), - GetDataLifecycle: indices_get_data_lifecycle.NewGetDataLifecycleFunc(tp), - GetDataStream: indices_get_data_stream.NewGetDataStreamFunc(tp), - GetFieldMapping: indices_get_field_mapping.NewGetFieldMappingFunc(tp), - GetIndexTemplate: indices_get_index_template.NewGetIndexTemplateFunc(tp), - GetMapping: indices_get_mapping.NewGetMappingFunc(tp), - GetSettings: indices_get_settings.NewGetSettingsFunc(tp), - GetTemplate: indices_get_template.NewGetTemplateFunc(tp), - MigrateToDataStream: indices_migrate_to_data_stream.NewMigrateToDataStreamFunc(tp), - ModifyDataStream: indices_modify_data_stream.NewModifyDataStreamFunc(tp), - Open: indices_open.NewOpenFunc(tp), - PromoteDataStream: indices_promote_data_stream.NewPromoteDataStreamFunc(tp), - PutAlias: indices_put_alias.NewPutAliasFunc(tp), - PutDataLifecycle: indices_put_data_lifecycle.NewPutDataLifecycleFunc(tp), - PutIndexTemplate: indices_put_index_template.NewPutIndexTemplateFunc(tp), - PutMapping: indices_put_mapping.NewPutMappingFunc(tp), - PutSettings: indices_put_settings.NewPutSettingsFunc(tp), - PutTemplate: indices_put_template.NewPutTemplateFunc(tp), - Recovery: indices_recovery.NewRecoveryFunc(tp), - Refresh: indices_refresh.NewRefreshFunc(tp), - ReloadSearchAnalyzers: indices_reload_search_analyzers.NewReloadSearchAnalyzersFunc(tp), - ResolveCluster: indices_resolve_cluster.NewResolveClusterFunc(tp), - ResolveIndex: indices_resolve_index.NewResolveIndexFunc(tp), - Rollover: indices_rollover.NewRolloverFunc(tp), - Segments: indices_segments.NewSegmentsFunc(tp), - ShardStores: indices_shard_stores.NewShardStoresFunc(tp), - Shrink: indices_shrink.NewShrinkFunc(tp), - SimulateIndexTemplate: indices_simulate_index_template.NewSimulateIndexTemplateFunc(tp), - SimulateTemplate: indices_simulate_template.NewSimulateTemplateFunc(tp), - Split: indices_split.NewSplitFunc(tp), - Stats: indices_stats.NewStatsFunc(tp), - Unfreeze: indices_unfreeze.NewUnfreezeFunc(tp), - UpdateAliases: indices_update_aliases.NewUpdateAliasesFunc(tp), - ValidateQuery: indices_validate_query.NewValidateQueryFunc(tp), + AddBlock: indices_add_block.NewAddBlockFunc(tp), + Analyze: indices_analyze.NewAnalyzeFunc(tp), + CancelMigrateReindex: indices_cancel_migrate_reindex.NewCancelMigrateReindexFunc(tp), + ClearCache: indices_clear_cache.NewClearCacheFunc(tp), + Clone: indices_clone.NewCloneFunc(tp), + Close: indices_close.NewCloseFunc(tp), + Create: indices_create.NewCreateFunc(tp), + CreateDataStream: indices_create_data_stream.NewCreateDataStreamFunc(tp), + CreateFrom: indices_create_from.NewCreateFromFunc(tp), + DataStreamsStats: indices_data_streams_stats.NewDataStreamsStatsFunc(tp), + Delete: indices_delete.NewDeleteFunc(tp), + DeleteAlias: indices_delete_alias.NewDeleteAliasFunc(tp), + DeleteDataLifecycle: indices_delete_data_lifecycle.NewDeleteDataLifecycleFunc(tp), + DeleteDataStream: indices_delete_data_stream.NewDeleteDataStreamFunc(tp), + DeleteIndexTemplate: indices_delete_index_template.NewDeleteIndexTemplateFunc(tp), + DeleteTemplate: indices_delete_template.NewDeleteTemplateFunc(tp), + DiskUsage: indices_disk_usage.NewDiskUsageFunc(tp), + Downsample: indices_downsample.NewDownsampleFunc(tp), + Exists: indices_exists.NewExistsFunc(tp), + ExistsAlias: indices_exists_alias.NewExistsAliasFunc(tp), + ExistsIndexTemplate: indices_exists_index_template.NewExistsIndexTemplateFunc(tp), + ExistsTemplate: indices_exists_template.NewExistsTemplateFunc(tp), + ExplainDataLifecycle: indices_explain_data_lifecycle.NewExplainDataLifecycleFunc(tp), + FieldUsageStats: indices_field_usage_stats.NewFieldUsageStatsFunc(tp), + Flush: indices_flush.NewFlushFunc(tp), + Forcemerge: indices_forcemerge.NewForcemergeFunc(tp), + Get: indices_get.NewGetFunc(tp), + GetAlias: indices_get_alias.NewGetAliasFunc(tp), + GetDataLifecycle: indices_get_data_lifecycle.NewGetDataLifecycleFunc(tp), + GetDataLifecycleStats: indices_get_data_lifecycle_stats.NewGetDataLifecycleStatsFunc(tp), + GetDataStream: indices_get_data_stream.NewGetDataStreamFunc(tp), + GetFieldMapping: indices_get_field_mapping.NewGetFieldMappingFunc(tp), + GetIndexTemplate: indices_get_index_template.NewGetIndexTemplateFunc(tp), + GetMapping: indices_get_mapping.NewGetMappingFunc(tp), + GetMigrateReindexStatus: indices_get_migrate_reindex_status.NewGetMigrateReindexStatusFunc(tp), + GetSettings: indices_get_settings.NewGetSettingsFunc(tp), + GetTemplate: indices_get_template.NewGetTemplateFunc(tp), + MigrateReindex: indices_migrate_reindex.NewMigrateReindexFunc(tp), + MigrateToDataStream: indices_migrate_to_data_stream.NewMigrateToDataStreamFunc(tp), + ModifyDataStream: indices_modify_data_stream.NewModifyDataStreamFunc(tp), + Open: indices_open.NewOpenFunc(tp), + PromoteDataStream: indices_promote_data_stream.NewPromoteDataStreamFunc(tp), + PutAlias: indices_put_alias.NewPutAliasFunc(tp), + PutDataLifecycle: indices_put_data_lifecycle.NewPutDataLifecycleFunc(tp), + PutIndexTemplate: indices_put_index_template.NewPutIndexTemplateFunc(tp), + PutMapping: indices_put_mapping.NewPutMappingFunc(tp), + PutSettings: indices_put_settings.NewPutSettingsFunc(tp), + PutTemplate: indices_put_template.NewPutTemplateFunc(tp), + Recovery: indices_recovery.NewRecoveryFunc(tp), + Refresh: indices_refresh.NewRefreshFunc(tp), + ReloadSearchAnalyzers: indices_reload_search_analyzers.NewReloadSearchAnalyzersFunc(tp), + ResolveCluster: indices_resolve_cluster.NewResolveClusterFunc(tp), + ResolveIndex: indices_resolve_index.NewResolveIndexFunc(tp), + Rollover: indices_rollover.NewRolloverFunc(tp), + Segments: indices_segments.NewSegmentsFunc(tp), + ShardStores: indices_shard_stores.NewShardStoresFunc(tp), + Shrink: indices_shrink.NewShrinkFunc(tp), + SimulateIndexTemplate: indices_simulate_index_template.NewSimulateIndexTemplateFunc(tp), + SimulateTemplate: indices_simulate_template.NewSimulateTemplateFunc(tp), + Split: indices_split.NewSplitFunc(tp), + Stats: indices_stats.NewStatsFunc(tp), + Unfreeze: indices_unfreeze.NewUnfreezeFunc(tp), + UpdateAliases: indices_update_aliases.NewUpdateAliasesFunc(tp), + ValidateQuery: indices_validate_query.NewValidateQueryFunc(tp), }, // Inference Inference: Inference{ - Delete: inference_delete.NewDeleteFunc(tp), - Get: inference_get.NewGetFunc(tp), - Inference: inference_inference.NewInferenceFunc(tp), - Put: inference_put.NewPutFunc(tp), + ChatCompletionUnified: inference_chat_completion_unified.NewChatCompletionUnifiedFunc(tp), + Completion: inference_completion.NewCompletionFunc(tp), + Delete: inference_delete.NewDeleteFunc(tp), + Get: inference_get.NewGetFunc(tp), + Put: inference_put.NewPutFunc(tp), + PutOpenai: inference_put_openai.NewPutOpenaiFunc(tp), + PutWatsonx: inference_put_watsonx.NewPutWatsonxFunc(tp), + Rerank: inference_rerank.NewRerankFunc(tp), + SparseEmbedding: inference_sparse_embedding.NewSparseEmbeddingFunc(tp), + StreamCompletion: inference_stream_completion.NewStreamCompletionFunc(tp), + TextEmbedding: inference_text_embedding.NewTextEmbeddingFunc(tp), + Update: inference_update.NewUpdateFunc(tp), }, // Ingest Ingest: Ingest{ - DeletePipeline: ingest_delete_pipeline.NewDeletePipelineFunc(tp), - GeoIpStats: ingest_geo_ip_stats.NewGeoIpStatsFunc(tp), - GetPipeline: ingest_get_pipeline.NewGetPipelineFunc(tp), - ProcessorGrok: ingest_processor_grok.NewProcessorGrokFunc(tp), - PutPipeline: ingest_put_pipeline.NewPutPipelineFunc(tp), - Simulate: ingest_simulate.NewSimulateFunc(tp), + DeleteGeoipDatabase: ingest_delete_geoip_database.NewDeleteGeoipDatabaseFunc(tp), + DeleteIpLocationDatabase: ingest_delete_ip_location_database.NewDeleteIpLocationDatabaseFunc(tp), + DeletePipeline: ingest_delete_pipeline.NewDeletePipelineFunc(tp), + GeoIpStats: ingest_geo_ip_stats.NewGeoIpStatsFunc(tp), + GetGeoipDatabase: ingest_get_geoip_database.NewGetGeoipDatabaseFunc(tp), + GetIpLocationDatabase: ingest_get_ip_location_database.NewGetIpLocationDatabaseFunc(tp), + GetPipeline: ingest_get_pipeline.NewGetPipelineFunc(tp), + ProcessorGrok: ingest_processor_grok.NewProcessorGrokFunc(tp), + PutGeoipDatabase: ingest_put_geoip_database.NewPutGeoipDatabaseFunc(tp), + PutIpLocationDatabase: ingest_put_ip_location_database.NewPutIpLocationDatabaseFunc(tp), + PutPipeline: ingest_put_pipeline.NewPutPipelineFunc(tp), + Simulate: ingest_simulate.NewSimulateFunc(tp), }, // License @@ -3788,6 +10647,7 @@ func New(tp elastictransport.Interface) *API { ListRulesets: query_rules_list_rulesets.NewListRulesetsFunc(tp), PutRule: query_rules_put_rule.NewPutRuleFunc(tp), PutRuleset: query_rules_put_ruleset.NewPutRulesetFunc(tp), + Test: query_rules_test.NewTestFunc(tp), }, // Rollup @@ -3804,14 +10664,16 @@ func New(tp elastictransport.Interface) *API { // SearchApplication SearchApplication: SearchApplication{ - Delete: search_application_delete.NewDeleteFunc(tp), - DeleteBehavioralAnalytics: search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalyticsFunc(tp), - Get: search_application_get.NewGetFunc(tp), - GetBehavioralAnalytics: search_application_get_behavioral_analytics.NewGetBehavioralAnalyticsFunc(tp), - List: search_application_list.NewListFunc(tp), - Put: search_application_put.NewPutFunc(tp), - PutBehavioralAnalytics: search_application_put_behavioral_analytics.NewPutBehavioralAnalyticsFunc(tp), - Search: search_application_search.NewSearchFunc(tp), + Delete: search_application_delete.NewDeleteFunc(tp), + DeleteBehavioralAnalytics: search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalyticsFunc(tp), + Get: search_application_get.NewGetFunc(tp), + GetBehavioralAnalytics: search_application_get_behavioral_analytics.NewGetBehavioralAnalyticsFunc(tp), + List: search_application_list.NewListFunc(tp), + PostBehavioralAnalyticsEvent: search_application_post_behavioral_analytics_event.NewPostBehavioralAnalyticsEventFunc(tp), + Put: search_application_put.NewPutFunc(tp), + PutBehavioralAnalytics: search_application_put_behavioral_analytics.NewPutBehavioralAnalyticsFunc(tp), + RenderQuery: search_application_render_query.NewRenderQueryFunc(tp), + Search: search_application_search.NewSearchFunc(tp), }, // SearchableSnapshots @@ -3838,6 +10700,7 @@ func New(tp elastictransport.Interface) *API { CreateApiKey: security_create_api_key.NewCreateApiKeyFunc(tp), CreateCrossClusterApiKey: security_create_cross_cluster_api_key.NewCreateCrossClusterApiKeyFunc(tp), CreateServiceToken: security_create_service_token.NewCreateServiceTokenFunc(tp), + DelegatePki: security_delegate_pki.NewDelegatePkiFunc(tp), DeletePrivileges: security_delete_privileges.NewDeletePrivilegesFunc(tp), DeleteRole: security_delete_role.NewDeleteRoleFunc(tp), DeleteRoleMapping: security_delete_role_mapping.NewDeleteRoleMappingFunc(tp), @@ -3884,6 +10747,7 @@ func New(tp elastictransport.Interface) *API { SamlServiceProviderMetadata: security_saml_service_provider_metadata.NewSamlServiceProviderMetadataFunc(tp), SuggestUserProfiles: security_suggest_user_profiles.NewSuggestUserProfilesFunc(tp), UpdateApiKey: security_update_api_key.NewUpdateApiKeyFunc(tp), + UpdateCrossClusterApiKey: security_update_cross_cluster_api_key.NewUpdateCrossClusterApiKeyFunc(tp), UpdateSettings: security_update_settings.NewUpdateSettingsFunc(tp), UpdateUserProfileData: security_update_user_profile_data.NewUpdateUserProfileDataFunc(tp), }, @@ -3895,6 +10759,11 @@ func New(tp elastictransport.Interface) *API { PutNode: shutdown_put_node.NewPutNodeFunc(tp), }, + // Simulate + Simulate: Simulate{ + Ingest: simulate_ingest.NewIngestFunc(tp), + }, + // Slm Slm: Slm{ DeleteLifecycle: slm_delete_lifecycle.NewDeleteLifecycleFunc(tp), @@ -3910,17 +10779,19 @@ func New(tp elastictransport.Interface) *API { // Snapshot Snapshot: Snapshot{ - CleanupRepository: snapshot_cleanup_repository.NewCleanupRepositoryFunc(tp), - Clone: snapshot_clone.NewCloneFunc(tp), - Create: snapshot_create.NewCreateFunc(tp), - CreateRepository: snapshot_create_repository.NewCreateRepositoryFunc(tp), - Delete: snapshot_delete.NewDeleteFunc(tp), - DeleteRepository: snapshot_delete_repository.NewDeleteRepositoryFunc(tp), - Get: snapshot_get.NewGetFunc(tp), - GetRepository: snapshot_get_repository.NewGetRepositoryFunc(tp), - Restore: snapshot_restore.NewRestoreFunc(tp), - Status: snapshot_status.NewStatusFunc(tp), - VerifyRepository: snapshot_verify_repository.NewVerifyRepositoryFunc(tp), + CleanupRepository: snapshot_cleanup_repository.NewCleanupRepositoryFunc(tp), + Clone: snapshot_clone.NewCloneFunc(tp), + Create: snapshot_create.NewCreateFunc(tp), + CreateRepository: snapshot_create_repository.NewCreateRepositoryFunc(tp), + Delete: snapshot_delete.NewDeleteFunc(tp), + DeleteRepository: snapshot_delete_repository.NewDeleteRepositoryFunc(tp), + Get: snapshot_get.NewGetFunc(tp), + GetRepository: snapshot_get_repository.NewGetRepositoryFunc(tp), + RepositoryAnalyze: snapshot_repository_analyze.NewRepositoryAnalyzeFunc(tp), + RepositoryVerifyIntegrity: snapshot_repository_verify_integrity.NewRepositoryVerifyIntegrityFunc(tp), + Restore: snapshot_restore.NewRestoreFunc(tp), + Status: snapshot_status.NewStatusFunc(tp), + VerifyRepository: snapshot_verify_repository.NewVerifyRepositoryFunc(tp), }, // Sql diff --git a/typedapi/asyncsearch/delete/delete.go b/typedapi/asyncsearch/delete/delete.go index 7cca10eaaf..d304c8135f 100644 --- a/typedapi/asyncsearch/delete/delete.go +++ b/typedapi/asyncsearch/delete/delete.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes an async search by identifier. -// If the search is still running, the search request will be cancelled. +// Delete an async search. +// +// If the asynchronous search is still running, it is cancelled. // Otherwise, the saved search results are deleted. // If the Elasticsearch security features are enabled, the deletion of a // specific async search is restricted to: the authenticated user that submitted @@ -82,8 +83,9 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } } -// Deletes an async search by identifier. -// If the search is still running, the search request will be cancelled. +// Delete an async search. +// +// If the asynchronous search is still running, it is cancelled. // Otherwise, the saved search results are deleted. // If the Elasticsearch security features are enabled, the deletion of a // specific async search is restricted to: the authenticated user that submitted diff --git a/typedapi/asyncsearch/delete/response.go b/typedapi/asyncsearch/delete/response.go index 0ff4ca302e..e714f4f8a8 100644 --- a/typedapi/asyncsearch/delete/response.go +++ b/typedapi/asyncsearch/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/async_search/delete/AsyncSearchDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/async_search/delete/AsyncSearchDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/asyncsearch/get/get.go b/typedapi/asyncsearch/get/get.go index 1b1c7f5afd..a3f4727f7b 100644 --- a/typedapi/asyncsearch/get/get.go +++ b/typedapi/asyncsearch/get/get.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves the results of a previously submitted async search request given -// its identifier. +// Get async search results. +// +// Retrieve the results of a previously submitted asynchronous search request. // If the Elasticsearch security features are enabled, access to the results of // a specific async search is restricted to the user or API key that submitted // it. @@ -80,8 +81,9 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } } -// Retrieves the results of a previously submitted async search request given -// its identifier. +// Get async search results. +// +// Retrieve the results of a previously submitted asynchronous search request. // If the Elasticsearch security features are enabled, access to the results of // a specific async search is restricted to the user or API key that submitted // it. @@ -307,7 +309,7 @@ func (r *Get) _id(id string) *Get { return r } -// KeepAlive Specifies how long the async search should be available in the cluster. +// KeepAlive The length of time that the async search should be available in the cluster. // When not specified, the `keep_alive` set with the corresponding submit async // request will be used. // Otherwise, it is possible to override the value and extend the validity of diff --git a/typedapi/asyncsearch/get/response.go b/typedapi/asyncsearch/get/response.go index a9b174e3e1..f47d0f8aa6 100644 --- a/typedapi/asyncsearch/get/response.go +++ b/typedapi/asyncsearch/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -33,11 +33,11 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/async_search/get/AsyncSearchGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/async_search/get/AsyncSearchGetResponse.ts#L22-L24 type Response struct { - // CompletionTime Indicates when the async search completed. Only present - // when the search has completed. + // CompletionTime Indicates when the async search completed. + // It is present only when the search has completed. CompletionTime types.DateTime `json:"completion_time,omitempty"` CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` // ExpirationTime Indicates when the async search will expire. @@ -49,8 +49,10 @@ type Response struct { // While the query is running, `is_partial` is always set to `true`. IsPartial bool `json:"is_partial"` // IsRunning Indicates whether the search is still running or has completed. - // NOTE: If the search failed after some shards returned their results or the - // node that is coordinating the async search dies, results may be partial even + // + // > info + // > If the search failed after some shards returned their results or the node + // that is coordinating the async search dies, results may be partial even // though `is_running` is `false`. IsRunning bool `json:"is_running"` Response types.AsyncSearch `json:"response"` diff --git a/typedapi/asyncsearch/status/response.go b/typedapi/asyncsearch/status/response.go index 4064cff2d1..30705c8acf 100644 --- a/typedapi/asyncsearch/status/response.go +++ b/typedapi/asyncsearch/status/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package status @@ -33,19 +33,20 @@ import ( // Response holds the response body struct for the package status // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/async_search/status/AsyncSearchStatusResponse.ts#L39-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/async_search/status/AsyncSearchStatusResponse.ts#L39-L41 type Response struct { // Clusters_ Metadata about clusters involved in the cross-cluster search. - // Not shown for local-only searches. + // It is not shown for local-only searches. Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` // CompletionStatus If the async search completed, this field shows the status code of the // search. - // For example, 200 indicates that the async search was successfully completed. - // 503 indicates that the async search was completed with an error. + // For example, `200` indicates that the async search was successfully + // completed. + // `503` indicates that the async search was completed with an error. CompletionStatus *int `json:"completion_status,omitempty"` - // CompletionTime Indicates when the async search completed. Only present - // when the search has completed. + // CompletionTime Indicates when the async search completed. + // It is present only when the search has completed. CompletionTime types.DateTime `json:"completion_time,omitempty"` CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` // ExpirationTime Indicates when the async search will expire. @@ -57,11 +58,13 @@ type Response struct { // While the query is running, `is_partial` is always set to `true`. IsPartial bool `json:"is_partial"` // IsRunning Indicates whether the search is still running or has completed. - // NOTE: If the search failed after some shards returned their results or the - // node that is coordinating the async search dies, results may be partial even + // + // > info + // > If the search failed after some shards returned their results or the node + // that is coordinating the async search dies, results may be partial even // though `is_running` is `false`. IsRunning bool `json:"is_running"` - // Shards_ Indicates how many shards have run the query so far. + // Shards_ The number of shards that have run the query so far. Shards_ types.ShardStatistics `json:"_shards"` StartTime types.DateTime `json:"start_time,omitempty"` StartTimeInMillis int64 `json:"start_time_in_millis"` diff --git a/typedapi/asyncsearch/status/status.go b/typedapi/asyncsearch/status/status.go index c275ba5542..ee0a553bbf 100644 --- a/typedapi/asyncsearch/status/status.go +++ b/typedapi/asyncsearch/status/status.go @@ -16,13 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Get async search status -// Retrieves the status of a previously submitted async search request given its +// Get the async search status. +// +// Get the status of a previously submitted async search request given its // identifier, without retrieving search results. -// If the Elasticsearch security features are enabled, use of this API is -// restricted to the `monitoring_user` role. +// If the Elasticsearch security features are enabled, the access to the status +// of a specific async search is restricted to: +// +// * The user or API key that submitted the original async search request. +// * Users that have the `monitor` cluster privilege or greater privileges. package status import ( @@ -80,11 +84,15 @@ func NewStatusFunc(tp elastictransport.Interface) NewStatus { } } -// Get async search status -// Retrieves the status of a previously submitted async search request given its +// Get the async search status. +// +// Get the status of a previously submitted async search request given its // identifier, without retrieving search results. -// If the Elasticsearch security features are enabled, use of this API is -// restricted to the `monitoring_user` role. +// If the Elasticsearch security features are enabled, the access to the status +// of a specific async search is restricted to: +// +// * The user or API key that submitted the original async search request. +// * Users that have the `monitor` cluster privilege or greater privileges. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html func New(tp elastictransport.Interface) *Status { @@ -307,6 +315,16 @@ func (r *Status) _id(id string) *Status { return r } +// KeepAlive The length of time that the async search needs to be available. +// Ongoing async searches and any saved search results are deleted after this +// period. +// API name: keep_alive +func (r *Status) KeepAlive(duration string) *Status { + r.values.Set("keep_alive", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/asyncsearch/submit/request.go b/typedapi/asyncsearch/submit/request.go index 1d6f37c29a..94a75e81ee 100644 --- a/typedapi/asyncsearch/submit/request.go +++ b/typedapi/asyncsearch/submit/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package submit @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package submit // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/async_search/submit/AsyncSearchSubmitRequest.ts#L55-L286 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/async_search/submit/AsyncSearchSubmitRequest.ts#L55-L296 type Request struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Collapse *types.FieldCollapse `json:"collapse,omitempty"` diff --git a/typedapi/asyncsearch/submit/response.go b/typedapi/asyncsearch/submit/response.go index 26f9158f14..c5275c7312 100644 --- a/typedapi/asyncsearch/submit/response.go +++ b/typedapi/asyncsearch/submit/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package submit @@ -33,11 +33,11 @@ import ( // Response holds the response body struct for the package submit // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/async_search/submit/AsyncSearchSubmitResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/async_search/submit/AsyncSearchSubmitResponse.ts#L22-L24 type Response struct { - // CompletionTime Indicates when the async search completed. Only present - // when the search has completed. + // CompletionTime Indicates when the async search completed. + // It is present only when the search has completed. CompletionTime types.DateTime `json:"completion_time,omitempty"` CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` // ExpirationTime Indicates when the async search will expire. @@ -49,8 +49,10 @@ type Response struct { // While the query is running, `is_partial` is always set to `true`. IsPartial bool `json:"is_partial"` // IsRunning Indicates whether the search is still running or has completed. - // NOTE: If the search failed after some shards returned their results or the - // node that is coordinating the async search dies, results may be partial even + // + // > info + // > If the search failed after some shards returned their results or the node + // that is coordinating the async search dies, results may be partial even // though `is_running` is `false`. IsRunning bool `json:"is_running"` Response types.AsyncSearch `json:"response"` diff --git a/typedapi/asyncsearch/submit/submit.go b/typedapi/asyncsearch/submit/submit.go index da20d9ea1e..d1e7c85097 100644 --- a/typedapi/asyncsearch/submit/submit.go +++ b/typedapi/asyncsearch/submit/submit.go @@ -16,17 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Runs a search request asynchronously. +// Run an async search. +// // When the primary sort of the results is an indexed field, shards get sorted -// based on minimum and maximum value that they hold for that field, hence -// partial results become available following the sort criteria that was -// requested. -// Warning: Async search does not support scroll nor search requests that only -// include the suggest section. -// By default, Elasticsearch doesn’t allow you to store an async search response -// larger than 10Mb and an attempt to do this results in an error. +// based on minimum and maximum value that they hold for that field. Partial +// results become available following the sort criteria that was requested. +// +// Warning: Asynchronous search does not support scroll or search requests that +// include only the suggest section. +// +// By default, Elasticsearch does not allow you to store an async search +// response larger than 10Mb and an attempt to do this results in an error. // The maximum allowed size for a stored async search response can be set by // changing the `search.max_async_search_response_size` cluster level setting. package submit @@ -93,15 +95,17 @@ func NewSubmitFunc(tp elastictransport.Interface) NewSubmit { } } -// Runs a search request asynchronously. +// Run an async search. +// // When the primary sort of the results is an indexed field, shards get sorted -// based on minimum and maximum value that they hold for that field, hence -// partial results become available following the sort criteria that was -// requested. -// Warning: Async search does not support scroll nor search requests that only -// include the suggest section. -// By default, Elasticsearch doesn’t allow you to store an async search response -// larger than 10Mb and an attempt to do this results in an error. +// based on minimum and maximum value that they hold for that field. Partial +// results become available following the sort criteria that was requested. +// +// Warning: Asynchronous search does not support scroll or search requests that +// include only the suggest section. +// +// By default, Elasticsearch does not allow you to store an async search +// response larger than 10Mb and an attempt to do this results in an error. // The maximum allowed size for a stored async search response can be set by // changing the `search.max_async_search_response_size` cluster level setting. // @@ -113,8 +117,6 @@ func New(tp elastictransport.Interface) *Submit { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -353,15 +355,6 @@ func (r *Submit) WaitForCompletionTimeout(duration string) *Submit { return r } -// KeepOnCompletion If `true`, results are stored for later retrieval when the search completes -// within the `wait_for_completion_timeout`. -// API name: keep_on_completion -func (r *Submit) KeepOnCompletion(keeponcompletion bool) *Submit { - r.values.Set("keep_on_completion", strconv.FormatBool(keeponcompletion)) - - return r -} - // KeepAlive Specifies how long the async search needs to be available. // Ongoing async searches and any saved search results are deleted after this // period. @@ -372,6 +365,15 @@ func (r *Submit) KeepAlive(duration string) *Submit { return r } +// KeepOnCompletion If `true`, results are stored for later retrieval when the search completes +// within the `wait_for_completion_timeout`. +// API name: keep_on_completion +func (r *Submit) KeepOnCompletion(keeponcompletion bool) *Submit { + r.values.Set("keep_on_completion", strconv.FormatBool(keeponcompletion)) + + return r +} + // AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete // indices. (This includes `_all` string or when no indices have been specified) // API name: allow_no_indices @@ -509,16 +511,6 @@ func (r *Submit) Preference(preference string) *Submit { return r } -// PreFilterShardSize The default value cannot be changed, which enforces the execution of a -// pre-filter roundtrip to retrieve statistics from each shard so that the ones -// that surely don’t hold any document matching the query get skipped. -// API name: pre_filter_shard_size -func (r *Submit) PreFilterShardSize(prefiltershardsize string) *Submit { - r.values.Set("pre_filter_shard_size", prefiltershardsize) - - return r -} - // RequestCache Specify if request cache should be used for this request or not, defaults to // true // API name: request_cache @@ -536,13 +528,6 @@ func (r *Submit) Routing(routing string) *Submit { return r } -// API name: scroll -func (r *Submit) Scroll(duration string) *Submit { - r.values.Set("scroll", duration) - - return r -} - // SearchType Search operation type // API name: search_type func (r *Submit) SearchType(searchtype searchtype.SearchType) *Submit { @@ -592,6 +577,8 @@ func (r *Submit) TypedKeys(typedkeys bool) *Submit { return r } +// RestTotalHitsAsInt Indicates whether hits.total should be rendered as an integer or an object in +// the rest search response // API name: rest_total_hits_as_int func (r *Submit) RestTotalHitsAsInt(resttotalhitsasint bool) *Submit { r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) @@ -669,221 +656,401 @@ func (r *Submit) Pretty(pretty bool) *Submit { // API name: aggregations func (r *Submit) Aggregations(aggregations map[string]types.Aggregations) *Submit { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} +func (r *Submit) AddAggregation(key string, value types.AggregationsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp return r } // API name: collapse -func (r *Submit) Collapse(collapse *types.FieldCollapse) *Submit { +func (r *Submit) Collapse(collapse types.FieldCollapseVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Collapse = collapse + r.req.Collapse = collapse.FieldCollapseCaster() return r } -// DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field +// Array of wildcard (*) patterns. The request returns doc values for field // names matching these patterns in the hits.fields property of the response. // API name: docvalue_fields -func (r *Submit) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Submit { - r.req.DocvalueFields = docvaluefields +func (r *Submit) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + } return r } -// Explain If true, returns detailed information about score computation as part of a +// If true, returns detailed information about score computation as part of a // hit. // API name: explain func (r *Submit) Explain(explain bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Ext Configuration of search extensions defined by Elasticsearch plugins. +// Configuration of search extensions defined by Elasticsearch plugins. // API name: ext func (r *Submit) Ext(ext map[string]json.RawMessage) *Submit { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Ext = ext + return r +} + +func (r *Submit) AddExt(key string, value json.RawMessage) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Ext == nil { + r.req.Ext = make(map[string]json.RawMessage) + } else { + tmp = r.req.Ext + } + tmp[key] = value + + r.req.Ext = tmp return r } -// Fields Array of wildcard (*) patterns. The request returns values for field names +// Array of wildcard (*) patterns. The request returns values for field names // matching these patterns in the hits.fields property of the response. // API name: fields -func (r *Submit) Fields(fields ...types.FieldAndFormat) *Submit { - r.req.Fields = fields +func (r *Submit) Fields(fields ...types.FieldAndFormatVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range fields { + r.req.Fields = append(r.req.Fields, *v.FieldAndFormatCaster()) + + } return r } -// From Starting document offset. By default, you cannot page through more than +// Starting document offset. By default, you cannot page through more than // 10,000 // hits using the from and size parameters. To page through more hits, use the // search_after parameter. // API name: from func (r *Submit) From(from int) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } // API name: highlight -func (r *Submit) Highlight(highlight *types.Highlight) *Submit { +func (r *Submit) Highlight(highlight types.HighlightVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Highlight = highlight + r.req.Highlight = highlight.HighlightCaster() return r } -// IndicesBoost Boosts the _score of documents from specified indices. +// Boosts the _score of documents from specified indices. // API name: indices_boost -func (r *Submit) IndicesBoost(indicesboosts ...map[string]types.Float64) *Submit { - r.req.IndicesBoost = indicesboosts +func (r *Submit) IndicesBoost(indicesboost []map[string]types.Float64) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesBoost = indicesboost return r } -// Knn Defines the approximate kNN search to run. +// Defines the approximate kNN search to run. // API name: knn -func (r *Submit) Knn(knns ...types.KnnSearch) *Submit { - r.req.Knn = knns +func (r *Submit) Knn(knns ...types.KnnSearchVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Knn = make([]types.KnnSearch, len(knns)) + for i, v := range knns { + r.req.Knn[i] = *v.KnnSearchCaster() + } return r } -// MinScore Minimum _score for matching documents. Documents with a lower _score are +// Minimum _score for matching documents. Documents with a lower _score are // not included in the search results. // API name: min_score func (r *Submit) MinScore(minscore types.Float64) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MinScore = &minscore return r } -// Pit Limits the search to a point in time (PIT). If you provide a PIT, you +// Limits the search to a point in time (PIT). If you provide a PIT, you // cannot specify an in the request path. // API name: pit -func (r *Submit) Pit(pit *types.PointInTimeReference) *Submit { +func (r *Submit) Pit(pit types.PointInTimeReferenceVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pit = pit + r.req.Pit = pit.PointInTimeReferenceCaster() return r } // API name: post_filter -func (r *Submit) PostFilter(postfilter *types.Query) *Submit { +func (r *Submit) PostFilter(postfilter types.QueryVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PostFilter = postfilter + r.req.PostFilter = postfilter.QueryCaster() return r } // API name: profile func (r *Submit) Profile(profile bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Query Defines the search definition using the Query DSL. +// Defines the search definition using the Query DSL. // API name: query -func (r *Submit) Query(query *types.Query) *Submit { +func (r *Submit) Query(query types.QueryVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } // API name: rescore -func (r *Submit) Rescore(rescores ...types.Rescore) *Submit { - r.req.Rescore = rescores +func (r *Submit) Rescore(rescores ...types.RescoreVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + r.req.Rescore[i] = *v.RescoreCaster() + } return r } -// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *Submit) RuntimeMappings(runtimefields types.RuntimeFields) *Submit { - r.req.RuntimeMappings = runtimefields +func (r *Submit) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Retrieve a script evaluation (based on different fields) for each hit. +// Retrieve a script evaluation (based on different fields) for each hit. // API name: script_fields func (r *Submit) ScriptFields(scriptfields map[string]types.ScriptField) *Submit { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} + +func (r *Submit) AddScriptField(key string, value types.ScriptFieldVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + r.req.ScriptFields = tmp return r } // API name: search_after -func (r *Submit) SearchAfter(sortresults ...types.FieldValue) *Submit { - r.req.SearchAfter = sortresults +func (r *Submit) SearchAfter(sortresults ...types.FieldValueVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification +// If true, returns sequence number and primary term of the last modification // of each hit. See Optimistic concurrency control. // API name: seq_no_primary_term func (r *Submit) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm return r } -// Size The number of hits to return. By default, you cannot page through more +// The number of hits to return. By default, you cannot page through more // than 10,000 hits using the from and size parameters. To page through more // hits, use the search_after parameter. // API name: size func (r *Submit) Size(size int) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } // API name: slice -func (r *Submit) Slice(slice *types.SlicedScroll) *Submit { +func (r *Submit) Slice(slice types.SlicedScrollVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } // API name: sort -func (r *Submit) Sort(sorts ...types.SortCombinations) *Submit { - r.req.Sort = sorts +func (r *Submit) Sort(sorts ...types.SortCombinationsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } -// Source_ Indicates which source fields are returned for matching documents. These +// Indicates which source fields are returned for matching documents. These // fields are returned in the hits._source property of the search response. // API name: _source -func (r *Submit) Source_(sourceconfig types.SourceConfig) *Submit { - r.req.Source_ = sourceconfig +func (r *Submit) Source_(sourceconfig types.SourceConfigVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// Stats Stats groups to associate with the search. Each group maintains a statistics +// Stats groups to associate with the search. Each group maintains a statistics // aggregation for its associated searches. You can retrieve these stats using // the indices stats API. // API name: stats func (r *Submit) Stats(stats ...string) *Submit { - r.req.Stats = stats + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stats { + + r.req.Stats = append(r.req.Stats, v) + } return r } -// StoredFields List of stored fields to return as part of a hit. If no fields are specified, +// List of stored fields to return as part of a hit. If no fields are specified, // no stored fields are included in the response. If this field is specified, // the _source // parameter defaults to false. You can pass _source: true to return both source @@ -891,20 +1058,29 @@ func (r *Submit) Stats(stats ...string) *Submit { // and stored fields in the search response. // API name: stored_fields func (r *Submit) StoredFields(fields ...string) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.StoredFields = fields return r } // API name: suggest -func (r *Submit) Suggest(suggest *types.Suggester) *Submit { +func (r *Submit) Suggest(suggest types.SuggesterVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Suggest = suggest + r.req.Suggest = suggest.SuggesterCaster() return r } -// TerminateAfter Maximum number of documents to collect for each shard. If a query reaches +// Maximum number of documents to collect for each shard. If a query reaches // this // limit, Elasticsearch terminates the query early. Elasticsearch collects // documents @@ -912,48 +1088,71 @@ func (r *Submit) Suggest(suggest *types.Suggester) *Submit { // early. // API name: terminate_after func (r *Submit) TerminateAfter(terminateafter int64) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TerminateAfter = &terminateafter return r } -// Timeout Specifies the period of time to wait for a response from each shard. If no +// Specifies the period of time to wait for a response from each shard. If no // response // is received before the timeout expires, the request fails and returns an // error. // Defaults to no timeout. // API name: timeout func (r *Submit) Timeout(timeout string) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Timeout = &timeout return r } -// TrackScores If true, calculate and return document scores, even if the scores are not +// If true, calculate and return document scores, even if the scores are not // used for sorting. // API name: track_scores func (r *Submit) TrackScores(trackscores bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TrackScores = &trackscores return r } -// TrackTotalHits Number of hits matching the query to count accurately. If true, the exact +// Number of hits matching the query to count accurately. If true, the exact // number of hits is returned at the cost of some performance. If false, the // response does not include the total number of hits matching the query. // Defaults to 10,000 hits. // API name: track_total_hits -func (r *Submit) TrackTotalHits(trackhits types.TrackHits) *Submit { - r.req.TrackTotalHits = trackhits +func (r *Submit) TrackTotalHits(trackhits types.TrackHitsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() return r } -// Version If true, returns document version as part of a hit. +// If true, returns document version as part of a hit. // API name: version func (r *Submit) Version(version bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &version return r diff --git a/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go b/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go index 89b910cc3e..9ac26b5557 100644 --- a/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go +++ b/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. -// Direct use is not supported. +// Delete an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. package deleteautoscalingpolicy import ( @@ -77,8 +80,11 @@ func NewDeleteAutoscalingPolicyFunc(tp elastictransport.Interface) NewDeleteAuto } } -// Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. -// Direct use is not supported. +// Delete an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html func New(tp elastictransport.Interface) *DeleteAutoscalingPolicy { @@ -301,6 +307,25 @@ func (r *DeleteAutoscalingPolicy) _name(name string) *DeleteAutoscalingPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteAutoscalingPolicy) MasterTimeout(duration string) *DeleteAutoscalingPolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteAutoscalingPolicy) Timeout(duration string) *DeleteAutoscalingPolicy { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/autoscaling/deleteautoscalingpolicy/response.go b/typedapi/autoscaling/deleteautoscalingpolicy/response.go index ac10ae3fd3..cb7c81225c 100644 --- a/typedapi/autoscaling/deleteautoscalingpolicy/response.go +++ b/typedapi/autoscaling/deleteautoscalingpolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteautoscalingpolicy // Response holds the response body struct for the package deleteautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/delete_autoscaling_policy/DeleteAutoscalingPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/delete_autoscaling_policy/DeleteAutoscalingPolicyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go b/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go index 087d0606e0..550dc9c15b 100644 --- a/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go +++ b/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go @@ -16,11 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Gets the current autoscaling capacity based on the configured autoscaling -// policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not +// Get the autoscaling capacity. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. +// +// This API gets the current autoscaling capacity based on the configured +// autoscaling policy. +// It will return information to size the cluster appropriately to the current +// workload. +// +// The `required_capacity` is calculated as the maximum of the +// `required_capacity` result of all individual deciders that are enabled for +// the policy. +// +// The operator should verify that the `current_nodes` match the operator’s +// knowledge of the cluster to avoid making autoscaling decisions based on stale +// or incomplete information. +// +// The response contains decider-specific information you can use to diagnose +// how and why autoscaling determined a certain capacity was required. +// This information is provided for diagnosis only. +// Do not use this information to make autoscaling decisions. package getautoscalingcapacity import ( @@ -70,10 +90,30 @@ func NewGetAutoscalingCapacityFunc(tp elastictransport.Interface) NewGetAutoscal } } -// Gets the current autoscaling capacity based on the configured autoscaling -// policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not +// Get the autoscaling capacity. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. // +// This API gets the current autoscaling capacity based on the configured +// autoscaling policy. +// It will return information to size the cluster appropriately to the current +// workload. +// +// The `required_capacity` is calculated as the maximum of the +// `required_capacity` result of all individual deciders that are enabled for +// the policy. +// +// The operator should verify that the `current_nodes` match the operator’s +// knowledge of the cluster to avoid making autoscaling decisions based on stale +// or incomplete information. +// +// The response contains decider-specific information you can use to diagnose +// how and why autoscaling determined a certain capacity was required. +// This information is provided for diagnosis only. +// Do not use this information to make autoscaling decisions. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html func New(tp elastictransport.Interface) *GetAutoscalingCapacity { r := &GetAutoscalingCapacity{ @@ -280,6 +320,16 @@ func (r *GetAutoscalingCapacity) Header(key, value string) *GetAutoscalingCapaci return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetAutoscalingCapacity) MasterTimeout(duration string) *GetAutoscalingCapacity { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/autoscaling/getautoscalingcapacity/response.go b/typedapi/autoscaling/getautoscalingcapacity/response.go index 07c61d2528..db4f47d316 100644 --- a/typedapi/autoscaling/getautoscalingcapacity/response.go +++ b/typedapi/autoscaling/getautoscalingcapacity/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getautoscalingcapacity @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getautoscalingcapacity // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L25-L29 type Response struct { Policies map[string]types.AutoscalingDeciders `json:"policies"` } diff --git a/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go b/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go index 374c8c9c39..0a9ee00b49 100644 --- a/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go +++ b/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and -// ECK. Direct use is not supported. +// Get an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. package getautoscalingpolicy import ( @@ -77,8 +80,11 @@ func NewGetAutoscalingPolicyFunc(tp elastictransport.Interface) NewGetAutoscalin } } -// Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and -// ECK. Direct use is not supported. +// Get an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html func New(tp elastictransport.Interface) *GetAutoscalingPolicy { @@ -301,6 +307,16 @@ func (r *GetAutoscalingPolicy) _name(name string) *GetAutoscalingPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetAutoscalingPolicy) MasterTimeout(duration string) *GetAutoscalingPolicy { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/autoscaling/getautoscalingpolicy/response.go b/typedapi/autoscaling/getautoscalingpolicy/response.go index b549e63d49..d346442dd8 100644 --- a/typedapi/autoscaling/getautoscalingpolicy/response.go +++ b/typedapi/autoscaling/getautoscalingpolicy/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getautoscalingpolicy @@ -26,10 +26,10 @@ import ( // Response holds the response body struct for the package getautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/get_autoscaling_policy/GetAutoscalingPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/get_autoscaling_policy/GetAutoscalingPolicyResponse.ts#L22-L24 type Response struct { - // Deciders Decider settings + // Deciders Decider settings. Deciders map[string]json.RawMessage `json:"deciders"` Roles []string `json:"roles"` } diff --git a/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go b/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go index 79eae2757e..cb31576c93 100644 --- a/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go +++ b/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and -// ECK. Direct use is not supported. +// Create or update an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. package putautoscalingpolicy import ( @@ -82,8 +85,11 @@ func NewPutAutoscalingPolicyFunc(tp elastictransport.Interface) NewPutAutoscalin } } -// Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and -// ECK. Direct use is not supported. +// Create or update an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html func New(tp elastictransport.Interface) *PutAutoscalingPolicy { @@ -93,8 +99,6 @@ func New(tp elastictransport.Interface) *PutAutoscalingPolicy { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -317,6 +321,25 @@ func (r *PutAutoscalingPolicy) _name(name string) *PutAutoscalingPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutAutoscalingPolicy) MasterTimeout(duration string) *PutAutoscalingPolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PutAutoscalingPolicy) Timeout(duration string) *PutAutoscalingPolicy { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -361,18 +384,46 @@ func (r *PutAutoscalingPolicy) Pretty(pretty bool) *PutAutoscalingPolicy { return r } -// Deciders Decider settings +// Decider settings. // API name: deciders func (r *PutAutoscalingPolicy) Deciders(deciders map[string]json.RawMessage) *PutAutoscalingPolicy { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Deciders = deciders + return r +} + +func (r *PutAutoscalingPolicy) AddDecider(key string, value json.RawMessage) *PutAutoscalingPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Deciders == nil { + r.req.Deciders = make(map[string]json.RawMessage) + } else { + tmp = r.req.Deciders + } + + tmp[key] = value + r.req.Deciders = tmp return r } // API name: roles func (r *PutAutoscalingPolicy) Roles(roles ...string) *PutAutoscalingPolicy { - r.req.Roles = roles + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roles { + + r.req.Roles = append(r.req.Roles, v) + } return r } diff --git a/typedapi/autoscaling/putautoscalingpolicy/request.go b/typedapi/autoscaling/putautoscalingpolicy/request.go index b0f3301948..5384d7a023 100644 --- a/typedapi/autoscaling/putautoscalingpolicy/request.go +++ b/typedapi/autoscaling/putautoscalingpolicy/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putautoscalingpolicy @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package putautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyRequest.ts#L24-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyRequest.ts#L25-L57 type Request = types.AutoscalingPolicy // NewRequest returns a Request diff --git a/typedapi/autoscaling/putautoscalingpolicy/response.go b/typedapi/autoscaling/putautoscalingpolicy/response.go index 59208f8eae..bbd95720e1 100644 --- a/typedapi/autoscaling/putautoscalingpolicy/response.go +++ b/typedapi/autoscaling/putautoscalingpolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putautoscalingpolicy // Response holds the response body struct for the package putautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/capabilities/capabilities.go b/typedapi/capabilities/capabilities.go index 016061bfa2..db5eaee801 100644 --- a/typedapi/capabilities/capabilities.go +++ b/typedapi/capabilities/capabilities.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Checks if the specified combination of method, API, parameters, and arbitrary // capabilities are supported diff --git a/typedapi/cat/aliases/aliases.go b/typedapi/cat/aliases/aliases.go index dce6a35afc..5209ed55bc 100644 --- a/typedapi/cat/aliases/aliases.go +++ b/typedapi/cat/aliases/aliases.go @@ -16,16 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get aliases. -// Retrieves the cluster’s index aliases, including filter and routing -// information. -// The API does not return data stream aliases. // -// CAT APIs are only intended for human consumption using the command line or -// the Kibana console. They are not intended for use by applications. For -// application consumption, use the aliases API. +// Get the cluster's index aliases, including filter and routing information. +// This API does not return data stream aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or the Kibana console. They are not intended for use by applications. +// For application consumption, use the aliases API. package aliases import ( @@ -83,13 +83,13 @@ func NewAliasesFunc(tp elastictransport.Interface) NewAliases { } // Get aliases. -// Retrieves the cluster’s index aliases, including filter and routing -// information. -// The API does not return data stream aliases. // -// CAT APIs are only intended for human consumption using the command line or -// the Kibana console. They are not intended for use by applications. For -// application consumption, use the aliases API. +// Get the cluster's index aliases, including filter and routing information. +// This API does not return data stream aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or the Kibana console. They are not intended for use by applications. +// For application consumption, use the aliases API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-alias.html func New(tp elastictransport.Interface) *Aliases { @@ -320,28 +320,6 @@ func (r *Aliases) Name(name string) *Aliases { return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. -// API name: expand_wildcards -func (r *Aliases) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Aliases { - tmp := []string{} - for _, item := range expandwildcards { - tmp = append(tmp, item.String()) - } - r.values.Set("expand_wildcards", strings.Join(tmp, ",")) - - return r -} - -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Aliases) Format(format string) *Aliases { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Aliases) H(names ...string) *Aliases { @@ -350,11 +328,27 @@ func (r *Aliases) H(names ...string) *Aliases { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Aliases) Help(help bool) *Aliases { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Aliases) S(names ...string) *Aliases { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Aliases) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Aliases { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -370,7 +364,10 @@ func (r *Aliases) Local(local bool) *Aliases { return r } -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicated that the request should never timeout, you can set it to `-1`. // API name: master_timeout func (r *Aliases) MasterTimeout(duration string) *Aliases { r.values.Set("master_timeout", duration) @@ -378,12 +375,20 @@ func (r *Aliases) MasterTimeout(duration string) *Aliases { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Aliases) S(names ...string) *Aliases { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Aliases) Format(format string) *Aliases { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Aliases) Help(help bool) *Aliases { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/aliases/response.go b/typedapi/cat/aliases/response.go index ab27589687..9d418464b1 100644 --- a/typedapi/cat/aliases/response.go +++ b/typedapi/cat/aliases/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package aliases @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package aliases // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/aliases/CatAliasesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/aliases/CatAliasesResponse.ts#L22-L24 type Response []types.AliasesRecord diff --git a/typedapi/cat/allocation/allocation.go b/typedapi/cat/allocation/allocation.go index 3051d93db4..1811c0a2e7 100644 --- a/typedapi/cat/allocation/allocation.go +++ b/typedapi/cat/allocation/allocation.go @@ -16,11 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Provides a snapshot of the number of shards allocated to each data node and -// their disk space. -// IMPORTANT: cat APIs are only intended for human consumption using the command +// Get shard allocation information. +// +// Get a snapshot of the number of shards allocated to each data node and their +// disk space. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. package allocation @@ -78,9 +81,12 @@ func NewAllocationFunc(tp elastictransport.Interface) NewAllocation { } } -// Provides a snapshot of the number of shards allocated to each data node and -// their disk space. -// IMPORTANT: cat APIs are only intended for human consumption using the command +// Get shard allocation information. +// +// Get a snapshot of the number of shards allocated to each data node and their +// disk space. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html @@ -302,8 +308,8 @@ func (r *Allocation) Header(key, value string) *Allocation { return r } -// NodeId Comma-separated list of node identifiers or names used to limit the returned -// information. +// NodeId A comma-separated list of node identifiers or names used to limit the +// returned information. // API Name: nodeid func (r *Allocation) NodeId(nodeid string) *Allocation { r.paramSet |= nodeidMask @@ -320,15 +326,6 @@ func (r *Allocation) Bytes(bytes bytes.Bytes) *Allocation { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Allocation) Format(format string) *Allocation { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Allocation) H(names ...string) *Allocation { @@ -337,11 +334,12 @@ func (r *Allocation) H(names ...string) *Allocation { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Allocation) Help(help bool) *Allocation { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Allocation) S(names ...string) *Allocation { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -365,12 +363,20 @@ func (r *Allocation) MasterTimeout(duration string) *Allocation { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Allocation) S(names ...string) *Allocation { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Allocation) Format(format string) *Allocation { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Allocation) Help(help bool) *Allocation { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/allocation/response.go b/typedapi/cat/allocation/response.go index 81fc0a1f4f..fcc3ef0cd1 100644 --- a/typedapi/cat/allocation/response.go +++ b/typedapi/cat/allocation/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package allocation @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package allocation // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/allocation/CatAllocationResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/allocation/CatAllocationResponse.ts#L22-L24 type Response []types.AllocationRecord diff --git a/typedapi/cat/componenttemplates/component_templates.go b/typedapi/cat/componenttemplates/component_templates.go index 0d15dd753c..64b360be40 100644 --- a/typedapi/cat/componenttemplates/component_templates.go +++ b/typedapi/cat/componenttemplates/component_templates.go @@ -16,15 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get component templates. -// Returns information about component templates in a cluster. +// +// Get information about component templates in a cluster. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // -// CAT APIs are only intended for human consumption using the command line or -// Kibana console. +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. // They are not intended for use by applications. For application consumption, // use the get component template API. package componenttemplates @@ -83,12 +84,13 @@ func NewComponentTemplatesFunc(tp elastictransport.Interface) NewComponentTempla } // Get component templates. -// Returns information about component templates in a cluster. +// +// Get information about component templates in a cluster. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // -// CAT APIs are only intended for human consumption using the command line or -// Kibana console. +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. // They are not intended for use by applications. For application consumption, // use the get component template API. // @@ -311,8 +313,9 @@ func (r *ComponentTemplates) Header(key, value string) *ComponentTemplates { return r } -// Name The name of the component template. Accepts wildcard expressions. If omitted, -// all component templates are returned. +// Name The name of the component template. +// It accepts wildcard expressions. +// If it is omitted, all component templates are returned. // API Name: name func (r *ComponentTemplates) Name(name string) *ComponentTemplates { r.paramSet |= nameMask @@ -321,15 +324,6 @@ func (r *ComponentTemplates) Name(name string) *ComponentTemplates { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *ComponentTemplates) Format(format string) *ComponentTemplates { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *ComponentTemplates) H(names ...string) *ComponentTemplates { @@ -338,11 +332,12 @@ func (r *ComponentTemplates) H(names ...string) *ComponentTemplates { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *ComponentTemplates) Help(help bool) *ComponentTemplates { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *ComponentTemplates) S(names ...string) *ComponentTemplates { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -358,7 +353,7 @@ func (r *ComponentTemplates) Local(local bool) *ComponentTemplates { return r } -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. // API name: master_timeout func (r *ComponentTemplates) MasterTimeout(duration string) *ComponentTemplates { r.values.Set("master_timeout", duration) @@ -366,12 +361,20 @@ func (r *ComponentTemplates) MasterTimeout(duration string) *ComponentTemplates return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *ComponentTemplates) S(names ...string) *ComponentTemplates { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *ComponentTemplates) Format(format string) *ComponentTemplates { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *ComponentTemplates) Help(help bool) *ComponentTemplates { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/componenttemplates/response.go b/typedapi/cat/componenttemplates/response.go index 4f3168f2f2..b58f540a87 100644 --- a/typedapi/cat/componenttemplates/response.go +++ b/typedapi/cat/componenttemplates/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package componenttemplates @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package componenttemplates // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/component_templates/CatComponentTemplatesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/component_templates/CatComponentTemplatesResponse.ts#L22-L24 type Response []types.CatComponentTemplate diff --git a/typedapi/cat/count/count.go b/typedapi/cat/count/count.go index fe64df910e..dff976e18d 100644 --- a/typedapi/cat/count/count.go +++ b/typedapi/cat/count/count.go @@ -16,16 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get a document count. -// Provides quick access to a document count for a data stream, an index, or an +// +// Get quick access to a document count for a data stream, an index, or an // entire cluster. // The document count only includes live documents, not deleted documents which // have not yet been removed by the merge process. // -// CAT APIs are only intended for human consumption using the command line or -// Kibana console. +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. // They are not intended for use by applications. For application consumption, // use the count API. package count @@ -84,13 +85,14 @@ func NewCountFunc(tp elastictransport.Interface) NewCount { } // Get a document count. -// Provides quick access to a document count for a data stream, an index, or an +// +// Get quick access to a document count for a data stream, an index, or an // entire cluster. // The document count only includes live documents, not deleted documents which // have not yet been removed by the merge process. // -// CAT APIs are only intended for human consumption using the command line or -// Kibana console. +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. // They are not intended for use by applications. For application consumption, // use the count API. // @@ -313,10 +315,11 @@ func (r *Count) Header(key, value string) *Count { return r } -// Index Comma-separated list of data streams, indices, and aliases used to limit the -// request. -// Supports wildcards (`*`). To target all data streams and indices, omit this -// parameter or use `*` or `_all`. +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. +// It supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index func (r *Count) Index(index string) *Count { r.paramSet |= indexMask @@ -325,15 +328,6 @@ func (r *Count) Index(index string) *Count { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Count) Format(format string) *Count { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Count) H(names ...string) *Count { @@ -342,40 +336,30 @@ func (r *Count) H(names ...string) *Count { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Count) Help(help bool) *Count { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Count) Local(local bool) *Count { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Count) S(names ...string) *Count { + r.values.Set("s", strings.Join(names, ",")) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Count) MasterTimeout(duration string) *Count { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Count) Format(format string) *Count { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Count) S(names ...string) *Count { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Count) Help(help bool) *Count { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/count/response.go b/typedapi/cat/count/response.go index 4f4540f024..4118b7b408 100644 --- a/typedapi/cat/count/response.go +++ b/typedapi/cat/count/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package count @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package count // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/count/CatCountResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/count/CatCountResponse.ts#L22-L24 type Response []types.CountRecord diff --git a/typedapi/cat/fielddata/fielddata.go b/typedapi/cat/fielddata/fielddata.go index 5f07762568..b10e199563 100644 --- a/typedapi/cat/fielddata/fielddata.go +++ b/typedapi/cat/fielddata/fielddata.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the amount of heap memory currently used by the field data cache on -// every data node in the cluster. +// Get field data cache information. +// +// Get the amount of heap memory currently used by the field data cache on every +// data node in the cluster. +// // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, @@ -80,8 +83,11 @@ func NewFielddataFunc(tp elastictransport.Interface) NewFielddata { } } -// Returns the amount of heap memory currently used by the field data cache on -// every data node in the cluster. +// Get field data cache information. +// +// Get the amount of heap memory currently used by the field data cache on every +// data node in the cluster. +// // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, @@ -324,15 +330,6 @@ func (r *Fielddata) Bytes(bytes bytes.Bytes) *Fielddata { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Fielddata) Format(format string) *Fielddata { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Fielddata) H(names ...string) *Fielddata { @@ -341,40 +338,30 @@ func (r *Fielddata) H(names ...string) *Fielddata { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Fielddata) Help(help bool) *Fielddata { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Fielddata) Local(local bool) *Fielddata { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Fielddata) S(names ...string) *Fielddata { + r.values.Set("s", strings.Join(names, ",")) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Fielddata) MasterTimeout(duration string) *Fielddata { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Fielddata) Format(format string) *Fielddata { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Fielddata) S(names ...string) *Fielddata { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Fielddata) Help(help bool) *Fielddata { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/fielddata/response.go b/typedapi/cat/fielddata/response.go index 8e3717327b..a094f3961f 100644 --- a/typedapi/cat/fielddata/response.go +++ b/typedapi/cat/fielddata/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package fielddata @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package fielddata // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/fielddata/CatFielddataResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/fielddata/CatFielddataResponse.ts#L22-L24 type Response []types.FielddataRecord diff --git a/typedapi/cat/health/health.go b/typedapi/cat/health/health.go index 7a5a835f14..6114287ffa 100644 --- a/typedapi/cat/health/health.go +++ b/typedapi/cat/health/health.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the health status of a cluster, similar to the cluster health API. -// IMPORTANT: cat APIs are only intended for human consumption using the command +// Get the cluster health status. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the cluster health API. @@ -83,8 +84,9 @@ func NewHealthFunc(tp elastictransport.Interface) NewHealth { } } -// Returns the health status of a cluster, similar to the cluster health API. -// IMPORTANT: cat APIs are only intended for human consumption using the command +// Get the cluster health status. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command // line or Kibana console. // They are not intended for use by applications. For application consumption, // use the cluster health API. @@ -321,15 +323,6 @@ func (r *Health) Ts(ts bool) *Health { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Health) Format(format string) *Health { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Health) H(names ...string) *Health { @@ -338,40 +331,30 @@ func (r *Health) H(names ...string) *Health { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Health) Help(help bool) *Health { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Health) Local(local bool) *Health { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Health) S(names ...string) *Health { + r.values.Set("s", strings.Join(names, ",")) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Health) MasterTimeout(duration string) *Health { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Health) Format(format string) *Health { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Health) S(names ...string) *Health { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Health) Help(help bool) *Health { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/health/response.go b/typedapi/cat/health/response.go index d52a652562..5c362cff28 100644 --- a/typedapi/cat/health/response.go +++ b/typedapi/cat/health/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package health @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package health // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/health/CatHealthResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/health/CatHealthResponse.ts#L22-L24 type Response []types.HealthRecord diff --git a/typedapi/cat/help/help.go b/typedapi/cat/help/help.go index 9c67403f4b..4fc14c7a7b 100644 --- a/typedapi/cat/help/help.go +++ b/typedapi/cat/help/help.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get CAT help. -// Returns help for the CAT APIs. +// +// Get help for the CAT APIs. package help import ( @@ -30,7 +31,6 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" @@ -70,7 +70,8 @@ func NewHelpFunc(tp elastictransport.Interface) NewHelp { } // Get CAT help. -// Returns help for the CAT APIs. +// +// Get help for the CAT APIs. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html func New(tp elastictransport.Interface) *Help { @@ -177,7 +178,7 @@ func (r Help) Perform(providedCtx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a help.Response -func (r Help) Do(providedCtx context.Context) (Response, error) { +func (r Help) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -200,7 +201,7 @@ func (r Help) Do(providedCtx context.Context) (Response, error) { defer res.Body.Close() if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(&response) + err = json.NewDecoder(res.Body).Decode(response) if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) @@ -275,110 +276,3 @@ func (r *Help) Header(key, value string) *Help { return r } - -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Help) Format(format string) *Help { - r.values.Set("format", format) - - return r -} - -// H List of columns to appear in the response. Supports simple wildcards. -// API name: h -func (r *Help) H(names ...string) *Help { - r.values.Set("h", strings.Join(names, ",")) - - return r -} - -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Help) Help(help bool) *Help { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Help) Local(local bool) *Help { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Help) MasterTimeout(duration string) *Help { - r.values.Set("master_timeout", duration) - - return r -} - -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Help) S(names ...string) *Help { - r.values.Set("s", strings.Join(names, ",")) - - return r -} - -// V When set to `true` will enable verbose output. -// API name: v -func (r *Help) V(v bool) *Help { - r.values.Set("v", strconv.FormatBool(v)) - - return r -} - -// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors -// when they occur. -// API name: error_trace -func (r *Help) ErrorTrace(errortrace bool) *Help { - r.values.Set("error_trace", strconv.FormatBool(errortrace)) - - return r -} - -// FilterPath Comma-separated list of filters in dot notation which reduce the response -// returned by Elasticsearch. -// API name: filter_path -func (r *Help) FilterPath(filterpaths ...string) *Help { - tmp := []string{} - for _, item := range filterpaths { - tmp = append(tmp, fmt.Sprintf("%v", item)) - } - r.values.Set("filter_path", strings.Join(tmp, ",")) - - return r -} - -// Human When set to `true` will return statistics in a format suitable for humans. -// For example `"exists_time": "1h"` for humans and -// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human -// readable values will be omitted. This makes sense for responses being -// consumed -// only by machines. -// API name: human -func (r *Help) Human(human bool) *Help { - r.values.Set("human", strconv.FormatBool(human)) - - return r -} - -// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use -// this option for debugging only. -// API name: pretty -func (r *Help) Pretty(pretty bool) *Help { - r.values.Set("pretty", strconv.FormatBool(pretty)) - - return r -} diff --git a/typedapi/cat/help/response.go b/typedapi/cat/help/response.go index 13992e6614..4a7882b3ff 100644 --- a/typedapi/cat/help/response.go +++ b/typedapi/cat/help/response.go @@ -16,22 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package help -import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types" -) - // Response holds the response body struct for the package help // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/help/CatHelpResponse.ts#L22-L24 - -type Response []types.HelpRecord +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/help/CatHelpResponse.ts#L20-L25 +type Response struct { +} // NewResponse returns a Response -func NewResponse() Response { - r := Response{} +func NewResponse() *Response { + r := &Response{} return r } diff --git a/typedapi/cat/indices/indices.go b/typedapi/cat/indices/indices.go index 87b7216759..a0dc03d01d 100644 --- a/typedapi/cat/indices/indices.go +++ b/typedapi/cat/indices/indices.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get index information. -// Returns high-level information about indices in a cluster, including backing +// +// Get high-level information about indices in a cluster, including backing // indices for data streams. // // Use this request to get the following information for each index in a @@ -100,7 +101,8 @@ func NewIndicesFunc(tp elastictransport.Interface) NewIndices { } // Get index information. -// Returns high-level information about indices in a cluster, including backing +// +// Get high-level information about indices in a cluster, including backing // indices for data streams. // // Use this request to get the following information for each index in a @@ -407,11 +409,10 @@ func (r *Indices) Time(time timeunit.TimeUnit) *Indices { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Indices) Format(format string) *Indices { - r.values.Set("format", format) +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Indices) MasterTimeout(duration string) *Indices { + r.values.Set("master_timeout", duration) return r } @@ -424,40 +425,30 @@ func (r *Indices) H(names ...string) *Indices { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Indices) Help(help bool) *Indices { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Indices) Local(local bool) *Indices { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Indices) S(names ...string) *Indices { + r.values.Set("s", strings.Join(names, ",")) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Indices) MasterTimeout(duration string) *Indices { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Indices) Format(format string) *Indices { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Indices) S(names ...string) *Indices { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Indices) Help(help bool) *Indices { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/indices/response.go b/typedapi/cat/indices/response.go index 74f27a0667..e1af72a3d9 100644 --- a/typedapi/cat/indices/response.go +++ b/typedapi/cat/indices/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package indices @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package indices // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/indices/CatIndicesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/indices/CatIndicesResponse.ts#L22-L24 type Response []types.IndicesRecord diff --git a/typedapi/cat/master/master.go b/typedapi/cat/master/master.go index 146ec1cdbb..3dd74f698e 100644 --- a/typedapi/cat/master/master.go +++ b/typedapi/cat/master/master.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about the master node, including the ID, bound IP -// address, and name. +// Get master node information. +// +// Get information about the master node, including the ID, bound IP address, +// and name. +// // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -72,8 +75,11 @@ func NewMasterFunc(tp elastictransport.Interface) NewMaster { } } -// Returns information about the master node, including the ID, bound IP -// address, and name. +// Get master node information. +// +// Get information about the master node, including the ID, bound IP address, +// and name. +// // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -284,15 +290,6 @@ func (r *Master) Header(key, value string) *Master { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Master) Format(format string) *Master { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Master) H(names ...string) *Master { @@ -301,11 +298,12 @@ func (r *Master) H(names ...string) *Master { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Master) Help(help bool) *Master { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Master) S(names ...string) *Master { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -329,12 +327,20 @@ func (r *Master) MasterTimeout(duration string) *Master { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Master) S(names ...string) *Master { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Master) Format(format string) *Master { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Master) Help(help bool) *Master { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/master/response.go b/typedapi/cat/master/response.go index 001c32a61a..5de5e74198 100644 --- a/typedapi/cat/master/response.go +++ b/typedapi/cat/master/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package master @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package master // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/master/CatMasterResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/master/CatMasterResponse.ts#L22-L24 type Response []types.MasterRecord diff --git a/typedapi/cat/mldatafeeds/ml_datafeeds.go b/typedapi/cat/mldatafeeds/ml_datafeeds.go index c7ac282615..5d0db822b1 100644 --- a/typedapi/cat/mldatafeeds/ml_datafeeds.go +++ b/typedapi/cat/mldatafeeds/ml_datafeeds.go @@ -16,16 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get datafeeds. -// Returns configuration and usage information about datafeeds. +// +// Get configuration and usage information about datafeeds. // This API returns a maximum of 10,000 datafeeds. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, `monitor`, `manage_ml`, or `manage` // cluster privileges to use this API. // -// CAT APIs are only intended for human consumption using the Kibana +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get datafeed statistics API. package mldatafeeds @@ -86,13 +87,14 @@ func NewMlDatafeedsFunc(tp elastictransport.Interface) NewMlDatafeeds { } // Get datafeeds. -// Returns configuration and usage information about datafeeds. +// +// Get configuration and usage information about datafeeds. // This API returns a maximum of 10,000 datafeeds. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, `monitor`, `manage_ml`, or `manage` // cluster privileges to use this API. // -// CAT APIs are only intended for human consumption using the Kibana +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get datafeed statistics API. // @@ -397,25 +399,6 @@ func (r *MlDatafeeds) Help(help bool) *MlDatafeeds { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *MlDatafeeds) Local(local bool) *MlDatafeeds { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *MlDatafeeds) MasterTimeout(duration string) *MlDatafeeds { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *MlDatafeeds) V(v bool) *MlDatafeeds { diff --git a/typedapi/cat/mldatafeeds/response.go b/typedapi/cat/mldatafeeds/response.go index 174f9556e9..b2529e3360 100644 --- a/typedapi/cat/mldatafeeds/response.go +++ b/typedapi/cat/mldatafeeds/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mldatafeeds @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mldatafeeds // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/ml_datafeeds/CatDatafeedsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/ml_datafeeds/CatDatafeedsResponse.ts#L22-L24 type Response []types.DatafeedsRecord diff --git a/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go b/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go index 6133297693..9c858b81f8 100644 --- a/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go +++ b/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go @@ -16,12 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get data frame analytics jobs. -// Returns configuration and usage information about data frame analytics jobs. // -// CAT APIs are only intended for human consumption using the Kibana +// Get configuration and usage information about data frame analytics jobs. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get data frame analytics jobs statistics // API. @@ -42,6 +43,7 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdfacolumn" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -83,9 +85,10 @@ func NewMlDataFrameAnalyticsFunc(tp elastictransport.Interface) NewMlDataFrameAn } // Get data frame analytics jobs. -// Returns configuration and usage information about data frame analytics jobs. // -// CAT APIs are only intended for human consumption using the Kibana +// Get configuration and usage information about data frame analytics jobs. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get data frame analytics jobs statistics // API. @@ -370,8 +373,8 @@ func (r *MlDataFrameAnalytics) S(catdfacolumns ...catdfacolumn.CatDfaColumn) *Ml // Time Unit used to display time values. // API name: time -func (r *MlDataFrameAnalytics) Time(duration string) *MlDataFrameAnalytics { - r.values.Set("time", duration) +func (r *MlDataFrameAnalytics) Time(time timeunit.TimeUnit) *MlDataFrameAnalytics { + r.values.Set("time", time.String()) return r } @@ -394,25 +397,6 @@ func (r *MlDataFrameAnalytics) Help(help bool) *MlDataFrameAnalytics { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *MlDataFrameAnalytics) Local(local bool) *MlDataFrameAnalytics { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *MlDataFrameAnalytics) MasterTimeout(duration string) *MlDataFrameAnalytics { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *MlDataFrameAnalytics) V(v bool) *MlDataFrameAnalytics { diff --git a/typedapi/cat/mldataframeanalytics/response.go b/typedapi/cat/mldataframeanalytics/response.go index 444011571c..917bcef650 100644 --- a/typedapi/cat/mldataframeanalytics/response.go +++ b/typedapi/cat/mldataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mldataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mldataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsResponse.ts#L22-L24 type Response []types.DataFrameAnalyticsRecord diff --git a/typedapi/cat/mljobs/ml_jobs.go b/typedapi/cat/mljobs/ml_jobs.go index 25cb427592..934af24caa 100644 --- a/typedapi/cat/mljobs/ml_jobs.go +++ b/typedapi/cat/mljobs/ml_jobs.go @@ -16,16 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get anomaly detection jobs. -// Returns configuration and usage information for anomaly detection jobs. +// +// Get configuration and usage information for anomaly detection jobs. // This API returns a maximum of 10,000 jobs. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, // `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. // -// CAT APIs are only intended for human consumption using the Kibana +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get anomaly detection job statistics API. package mljobs @@ -87,13 +88,14 @@ func NewMlJobsFunc(tp elastictransport.Interface) NewMlJobs { } // Get anomaly detection jobs. -// Returns configuration and usage information for anomaly detection jobs. +// +// Get configuration and usage information for anomaly detection jobs. // This API returns a maximum of 10,000 jobs. // If the Elasticsearch security features are enabled, you must have // `monitor_ml`, // `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. // -// CAT APIs are only intended for human consumption using the Kibana +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get anomaly detection job statistics API. // @@ -406,25 +408,6 @@ func (r *MlJobs) Help(help bool) *MlJobs { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *MlJobs) Local(local bool) *MlJobs { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *MlJobs) MasterTimeout(duration string) *MlJobs { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *MlJobs) V(v bool) *MlJobs { diff --git a/typedapi/cat/mljobs/response.go b/typedapi/cat/mljobs/response.go index e44204086a..b769f83521 100644 --- a/typedapi/cat/mljobs/response.go +++ b/typedapi/cat/mljobs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mljobs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mljobs // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/ml_jobs/CatJobsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/ml_jobs/CatJobsResponse.ts#L22-L24 type Response []types.JobsRecord diff --git a/typedapi/cat/mltrainedmodels/ml_trained_models.go b/typedapi/cat/mltrainedmodels/ml_trained_models.go index c68671c9b4..a48d0fddfd 100644 --- a/typedapi/cat/mltrainedmodels/ml_trained_models.go +++ b/typedapi/cat/mltrainedmodels/ml_trained_models.go @@ -16,12 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get trained models. -// Returns configuration and usage information about inference trained models. // -// CAT APIs are only intended for human consumption using the Kibana +// Get configuration and usage information about inference trained models. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get trained models statistics API. package mltrainedmodels @@ -41,6 +42,7 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattrainedmodelscolumn" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -82,9 +84,10 @@ func NewMlTrainedModelsFunc(tp elastictransport.Interface) NewMlTrainedModels { } // Get trained models. -// Returns configuration and usage information about inference trained models. // -// CAT APIs are only intended for human consumption using the Kibana +// Get configuration and usage information about inference trained models. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For // application consumption, use the get trained models statistics API. // @@ -383,6 +386,14 @@ func (r *MlTrainedModels) Size(size int) *MlTrainedModels { return r } +// Time Unit used to display time values. +// API name: time +func (r *MlTrainedModels) Time(time timeunit.TimeUnit) *MlTrainedModels { + r.values.Set("time", time.String()) + + return r +} + // Format Specifies the format to return the columnar data in, can be set to // `text`, `json`, `cbor`, `yaml`, or `smile`. // API name: format @@ -401,25 +412,6 @@ func (r *MlTrainedModels) Help(help bool) *MlTrainedModels { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *MlTrainedModels) Local(local bool) *MlTrainedModels { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *MlTrainedModels) MasterTimeout(duration string) *MlTrainedModels { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *MlTrainedModels) V(v bool) *MlTrainedModels { diff --git a/typedapi/cat/mltrainedmodels/response.go b/typedapi/cat/mltrainedmodels/response.go index f7036a7cd0..f0fe53db6d 100644 --- a/typedapi/cat/mltrainedmodels/response.go +++ b/typedapi/cat/mltrainedmodels/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mltrainedmodels @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mltrainedmodels // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/ml_trained_models/CatTrainedModelsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/ml_trained_models/CatTrainedModelsResponse.ts#L22-L24 type Response []types.TrainedModelsRecord diff --git a/typedapi/cat/nodeattrs/nodeattrs.go b/typedapi/cat/nodeattrs/nodeattrs.go index da85e6bfb2..d28db47c5d 100644 --- a/typedapi/cat/nodeattrs/nodeattrs.go +++ b/typedapi/cat/nodeattrs/nodeattrs.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about custom node attributes. +// Get node attribute information. +// +// Get information about custom node attributes. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -71,7 +73,9 @@ func NewNodeattrsFunc(tp elastictransport.Interface) NewNodeattrs { } } -// Returns information about custom node attributes. +// Get node attribute information. +// +// Get information about custom node attributes. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -282,15 +286,6 @@ func (r *Nodeattrs) Header(key, value string) *Nodeattrs { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Nodeattrs) Format(format string) *Nodeattrs { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Nodeattrs) H(names ...string) *Nodeattrs { @@ -299,11 +294,12 @@ func (r *Nodeattrs) H(names ...string) *Nodeattrs { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Nodeattrs) Help(help bool) *Nodeattrs { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Nodeattrs) S(names ...string) *Nodeattrs { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -327,12 +323,20 @@ func (r *Nodeattrs) MasterTimeout(duration string) *Nodeattrs { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Nodeattrs) S(names ...string) *Nodeattrs { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Nodeattrs) Format(format string) *Nodeattrs { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Nodeattrs) Help(help bool) *Nodeattrs { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/nodeattrs/response.go b/typedapi/cat/nodeattrs/response.go index 82e7ec45ea..8bb057fac3 100644 --- a/typedapi/cat/nodeattrs/response.go +++ b/typedapi/cat/nodeattrs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package nodeattrs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package nodeattrs // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/nodeattrs/CatNodeAttributesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/nodeattrs/CatNodeAttributesResponse.ts#L22-L24 type Response []types.NodeAttributesRecord diff --git a/typedapi/cat/nodes/nodes.go b/typedapi/cat/nodes/nodes.go index 646ffcfacf..d84575b346 100644 --- a/typedapi/cat/nodes/nodes.go +++ b/typedapi/cat/nodes/nodes.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about the nodes in a cluster. +// Get node information. +// +// Get information about the nodes in a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -38,6 +40,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -72,7 +75,9 @@ func NewNodesFunc(tp elastictransport.Interface) NewNodes { } } -// Returns information about the nodes in a cluster. +// Get node information. +// +// Get information about the nodes in a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -308,15 +313,6 @@ func (r *Nodes) IncludeUnloadedSegments(includeunloadedsegments bool) *Nodes { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Nodes) Format(format string) *Nodes { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Nodes) H(names ...string) *Nodes { @@ -325,22 +321,12 @@ func (r *Nodes) H(names ...string) *Nodes { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Nodes) Help(help bool) *Nodes { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Nodes) Local(local bool) *Nodes { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Nodes) S(names ...string) *Nodes { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -353,12 +339,28 @@ func (r *Nodes) MasterTimeout(duration string) *Nodes { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Nodes) S(names ...string) *Nodes { - r.values.Set("s", strings.Join(names, ",")) +// Time Unit used to display time values. +// API name: time +func (r *Nodes) Time(time timeunit.TimeUnit) *Nodes { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Nodes) Format(format string) *Nodes { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Nodes) Help(help bool) *Nodes { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/nodes/response.go b/typedapi/cat/nodes/response.go index 6ab0270af1..f53b0af8ef 100644 --- a/typedapi/cat/nodes/response.go +++ b/typedapi/cat/nodes/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package nodes @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package nodes // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/nodes/CatNodesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/nodes/CatNodesResponse.ts#L22-L24 type Response []types.NodesRecord diff --git a/typedapi/cat/pendingtasks/pending_tasks.go b/typedapi/cat/pendingtasks/pending_tasks.go index 59e3ec70d8..58001b3ea9 100644 --- a/typedapi/cat/pendingtasks/pending_tasks.go +++ b/typedapi/cat/pendingtasks/pending_tasks.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns cluster-level changes that have not yet been executed. +// Get pending task information. +// +// Get information about cluster-level changes that have not yet taken effect. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the pending cluster tasks API. @@ -37,6 +39,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -71,7 +74,9 @@ func NewPendingTasksFunc(tp elastictransport.Interface) NewPendingTasks { } } -// Returns cluster-level changes that have not yet been executed. +// Get pending task information. +// +// Get information about cluster-level changes that have not yet taken effect. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the pending cluster tasks API. @@ -282,15 +287,6 @@ func (r *PendingTasks) Header(key, value string) *PendingTasks { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *PendingTasks) Format(format string) *PendingTasks { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *PendingTasks) H(names ...string) *PendingTasks { @@ -299,11 +295,12 @@ func (r *PendingTasks) H(names ...string) *PendingTasks { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *PendingTasks) Help(help bool) *PendingTasks { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *PendingTasks) S(names ...string) *PendingTasks { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -327,12 +324,28 @@ func (r *PendingTasks) MasterTimeout(duration string) *PendingTasks { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *PendingTasks) S(names ...string) *PendingTasks { - r.values.Set("s", strings.Join(names, ",")) +// Time Unit used to display time values. +// API name: time +func (r *PendingTasks) Time(time timeunit.TimeUnit) *PendingTasks { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *PendingTasks) Format(format string) *PendingTasks { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *PendingTasks) Help(help bool) *PendingTasks { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/pendingtasks/response.go b/typedapi/cat/pendingtasks/response.go index 43df84f36c..15b1fa23a6 100644 --- a/typedapi/cat/pendingtasks/response.go +++ b/typedapi/cat/pendingtasks/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package pendingtasks @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package pendingtasks // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/pending_tasks/CatPendingTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/pending_tasks/CatPendingTasksResponse.ts#L22-L24 type Response []types.PendingTasksRecord diff --git a/typedapi/cat/plugins/plugins.go b/typedapi/cat/plugins/plugins.go index 2f4ed2bfd8..396b481fcc 100644 --- a/typedapi/cat/plugins/plugins.go +++ b/typedapi/cat/plugins/plugins.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns a list of plugins running on each node of a cluster. +// Get plugin information. +// +// Get a list of plugins running on each node of a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -71,7 +73,9 @@ func NewPluginsFunc(tp elastictransport.Interface) NewPlugins { } } -// Returns a list of plugins running on each node of a cluster. +// Get plugin information. +// +// Get a list of plugins running on each node of a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the nodes info API. @@ -282,15 +286,6 @@ func (r *Plugins) Header(key, value string) *Plugins { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Plugins) Format(format string) *Plugins { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Plugins) H(names ...string) *Plugins { @@ -299,11 +294,20 @@ func (r *Plugins) H(names ...string) *Plugins { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Plugins) Help(help bool) *Plugins { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Plugins) S(names ...string) *Plugins { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// IncludeBootstrap Include bootstrap plugins in the response +// API name: include_bootstrap +func (r *Plugins) IncludeBootstrap(includebootstrap bool) *Plugins { + r.values.Set("include_bootstrap", strconv.FormatBool(includebootstrap)) return r } @@ -327,12 +331,20 @@ func (r *Plugins) MasterTimeout(duration string) *Plugins { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Plugins) S(names ...string) *Plugins { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Plugins) Format(format string) *Plugins { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Plugins) Help(help bool) *Plugins { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/plugins/response.go b/typedapi/cat/plugins/response.go index 39892f9308..1b01fcb7f1 100644 --- a/typedapi/cat/plugins/response.go +++ b/typedapi/cat/plugins/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package plugins @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package plugins // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/plugins/CatPluginsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/plugins/CatPluginsResponse.ts#L22-L24 type Response []types.PluginsRecord diff --git a/typedapi/cat/recovery/recovery.go b/typedapi/cat/recovery/recovery.go index 0a9360e406..b28339388e 100644 --- a/typedapi/cat/recovery/recovery.go +++ b/typedapi/cat/recovery/recovery.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about ongoing and completed shard recoveries. +// Get shard recovery information. +// +// Get information about ongoing and completed shard recoveries. // Shard recovery is the process of initializing a shard copy, such as restoring // a primary shard from a snapshot or syncing a replica shard from a primary // shard. When a shard recovery completes, the recovered shard is available for @@ -44,6 +46,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -84,7 +87,9 @@ func NewRecoveryFunc(tp elastictransport.Interface) NewRecovery { } } -// Returns information about ongoing and completed shard recoveries. +// Get shard recovery information. +// +// Get information about ongoing and completed shard recoveries. // Shard recovery is the process of initializing a shard copy, such as restoring // a primary shard from a snapshot or syncing a replica shard from a primary // shard. When a shard recovery completes, the recovered shard is available for @@ -350,15 +355,6 @@ func (r *Recovery) Detailed(detailed bool) *Recovery { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Recovery) Format(format string) *Recovery { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Recovery) H(names ...string) *Recovery { @@ -367,40 +363,38 @@ func (r *Recovery) H(names ...string) *Recovery { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Recovery) Help(help bool) *Recovery { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Recovery) S(names ...string) *Recovery { + r.values.Set("s", strings.Join(names, ",")) return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Recovery) Local(local bool) *Recovery { - r.values.Set("local", strconv.FormatBool(local)) +// Time Unit used to display time values. +// API name: time +func (r *Recovery) Time(time timeunit.TimeUnit) *Recovery { + r.values.Set("time", time.String()) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Recovery) MasterTimeout(duration string) *Recovery { - r.values.Set("master_timeout", duration) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Recovery) Format(format string) *Recovery { + r.values.Set("format", format) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Recovery) S(names ...string) *Recovery { - r.values.Set("s", strings.Join(names, ",")) +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Recovery) Help(help bool) *Recovery { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/recovery/response.go b/typedapi/cat/recovery/response.go index 9228581ee5..991dea2ea5 100644 --- a/typedapi/cat/recovery/response.go +++ b/typedapi/cat/recovery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package recovery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package recovery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/recovery/CatRecoveryResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/recovery/CatRecoveryResponse.ts#L22-L24 type Response []types.RecoveryRecord diff --git a/typedapi/cat/repositories/repositories.go b/typedapi/cat/repositories/repositories.go index 47d39aafc9..71a4afabbd 100644 --- a/typedapi/cat/repositories/repositories.go +++ b/typedapi/cat/repositories/repositories.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the snapshot repositories for a cluster. +// Get snapshot repository information. +// +// Get a list of snapshot repositories for a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get snapshot repository API. @@ -71,7 +73,9 @@ func NewRepositoriesFunc(tp elastictransport.Interface) NewRepositories { } } -// Returns the snapshot repositories for a cluster. +// Get snapshot repository information. +// +// Get a list of snapshot repositories for a cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the get snapshot repository API. @@ -282,15 +286,6 @@ func (r *Repositories) Header(key, value string) *Repositories { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Repositories) Format(format string) *Repositories { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Repositories) H(names ...string) *Repositories { @@ -299,11 +294,12 @@ func (r *Repositories) H(names ...string) *Repositories { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Repositories) Help(help bool) *Repositories { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Repositories) S(names ...string) *Repositories { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -327,12 +323,20 @@ func (r *Repositories) MasterTimeout(duration string) *Repositories { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Repositories) S(names ...string) *Repositories { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Repositories) Format(format string) *Repositories { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Repositories) Help(help bool) *Repositories { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/repositories/response.go b/typedapi/cat/repositories/response.go index 0bd84c85e3..11906ebd2f 100644 --- a/typedapi/cat/repositories/response.go +++ b/typedapi/cat/repositories/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package repositories @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package repositories // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/repositories/CatRepositoriesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/repositories/CatRepositoriesResponse.ts#L22-L24 type Response []types.RepositoriesRecord diff --git a/typedapi/cat/segments/response.go b/typedapi/cat/segments/response.go index cd9136f5a2..9ac567ab2c 100644 --- a/typedapi/cat/segments/response.go +++ b/typedapi/cat/segments/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package segments @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package segments // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/segments/CatSegmentsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/segments/CatSegmentsResponse.ts#L22-L24 type Response []types.SegmentsRecord diff --git a/typedapi/cat/segments/segments.go b/typedapi/cat/segments/segments.go index b200b94539..14e868234b 100644 --- a/typedapi/cat/segments/segments.go +++ b/typedapi/cat/segments/segments.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns low-level information about the Lucene segments in index shards. +// Get segment information. +// +// Get low-level information about the Lucene segments in index shards. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For @@ -79,7 +81,9 @@ func NewSegmentsFunc(tp elastictransport.Interface) NewSegments { } } -// Returns low-level information about the Lucene segments in index shards. +// Get segment information. +// +// Get low-level information about the Lucene segments in index shards. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For @@ -325,15 +329,6 @@ func (r *Segments) Bytes(bytes bytes.Bytes) *Segments { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Segments) Format(format string) *Segments { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Segments) H(names ...string) *Segments { @@ -342,11 +337,12 @@ func (r *Segments) H(names ...string) *Segments { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Segments) Help(help bool) *Segments { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Segments) S(names ...string) *Segments { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -370,12 +366,20 @@ func (r *Segments) MasterTimeout(duration string) *Segments { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Segments) S(names ...string) *Segments { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Segments) Format(format string) *Segments { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Segments) Help(help bool) *Segments { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/shards/response.go b/typedapi/cat/shards/response.go index 992498ec62..41f0374e07 100644 --- a/typedapi/cat/shards/response.go +++ b/typedapi/cat/shards/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package shards @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package shards // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/shards/CatShardsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/shards/CatShardsResponse.ts#L22-L24 type Response []types.ShardsRecord diff --git a/typedapi/cat/shards/shards.go b/typedapi/cat/shards/shards.go index cfea4a8eec..8946c4cf0a 100644 --- a/typedapi/cat/shards/shards.go +++ b/typedapi/cat/shards/shards.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about the shards in a cluster. +// Get shard information. +// +// Get information about the shards in a cluster. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. @@ -38,6 +40,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -78,7 +81,9 @@ func NewShardsFunc(tp elastictransport.Interface) NewShards { } } -// Returns information about the shards in a cluster. +// Get shard information. +// +// Get information about the shards in a cluster. // For data streams, the API returns information about the backing indices. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. @@ -323,15 +328,6 @@ func (r *Shards) Bytes(bytes bytes.Bytes) *Shards { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Shards) Format(format string) *Shards { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Shards) H(names ...string) *Shards { @@ -340,22 +336,12 @@ func (r *Shards) H(names ...string) *Shards { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Shards) Help(help bool) *Shards { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Shards) Local(local bool) *Shards { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Shards) S(names ...string) *Shards { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -368,12 +354,28 @@ func (r *Shards) MasterTimeout(duration string) *Shards { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Shards) S(names ...string) *Shards { - r.values.Set("s", strings.Join(names, ",")) +// Time Unit used to display time values. +// API name: time +func (r *Shards) Time(time timeunit.TimeUnit) *Shards { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Shards) Format(format string) *Shards { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Shards) Help(help bool) *Shards { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/snapshots/response.go b/typedapi/cat/snapshots/response.go index 300574791a..28ecd09f4f 100644 --- a/typedapi/cat/snapshots/response.go +++ b/typedapi/cat/snapshots/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package snapshots @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package snapshots // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/snapshots/CatSnapshotsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/snapshots/CatSnapshotsResponse.ts#L22-L24 type Response []types.SnapshotsRecord diff --git a/typedapi/cat/snapshots/snapshots.go b/typedapi/cat/snapshots/snapshots.go index b08e087d27..b9d7282f08 100644 --- a/typedapi/cat/snapshots/snapshots.go +++ b/typedapi/cat/snapshots/snapshots.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about the snapshots stored in one or more repositories. +// Get snapshot information. +// +// Get information about the snapshots stored in one or more repositories. // A snapshot is a backup of an index or running Elasticsearch cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For @@ -38,6 +40,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) const ( @@ -78,7 +81,9 @@ func NewSnapshotsFunc(tp elastictransport.Interface) NewSnapshots { } } -// Returns information about the snapshots stored in one or more repositories. +// Get snapshot information. +// +// Get information about the snapshots stored in one or more repositories. // A snapshot is a backup of an index or running Elasticsearch cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For @@ -324,15 +329,6 @@ func (r *Snapshots) IgnoreUnavailable(ignoreunavailable bool) *Snapshots { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Snapshots) Format(format string) *Snapshots { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Snapshots) H(names ...string) *Snapshots { @@ -341,22 +337,12 @@ func (r *Snapshots) H(names ...string) *Snapshots { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Snapshots) Help(help bool) *Snapshots { - r.values.Set("help", strconv.FormatBool(help)) - - return r -} - -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Snapshots) Local(local bool) *Snapshots { - r.values.Set("local", strconv.FormatBool(local)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Snapshots) S(names ...string) *Snapshots { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -369,12 +355,28 @@ func (r *Snapshots) MasterTimeout(duration string) *Snapshots { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Snapshots) S(names ...string) *Snapshots { - r.values.Set("s", strings.Join(names, ",")) +// Time Unit used to display time values. +// API name: time +func (r *Snapshots) Time(time timeunit.TimeUnit) *Snapshots { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Snapshots) Format(format string) *Snapshots { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Snapshots) Help(help bool) *Snapshots { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/tasks/response.go b/typedapi/cat/tasks/response.go index 5ec14b1b91..0725f2a421 100644 --- a/typedapi/cat/tasks/response.go +++ b/typedapi/cat/tasks/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package tasks @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package tasks // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/tasks/CatTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/tasks/CatTasksResponse.ts#L22-L24 type Response []types.TasksRecord diff --git a/typedapi/cat/tasks/tasks.go b/typedapi/cat/tasks/tasks.go index 7562f5bc8e..1d2fdc8dc8 100644 --- a/typedapi/cat/tasks/tasks.go +++ b/typedapi/cat/tasks/tasks.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about tasks currently executing in the cluster. +// Get task information. +// +// Get information about tasks currently running in the cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the task management API. @@ -37,6 +39,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -71,12 +74,14 @@ func NewTasksFunc(tp elastictransport.Interface) NewTasks { } } -// Returns information about tasks currently executing in the cluster. +// Get task information. +// +// Get information about tasks currently running in the cluster. // IMPORTANT: cat APIs are only intended for human consumption using the command // line or Kibana console. They are not intended for use by applications. For // application consumption, use the task management API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-tasks.html func New(tp elastictransport.Interface) *Tasks { r := &Tasks{ transport: tp, @@ -302,14 +307,14 @@ func (r *Tasks) Detailed(detailed bool) *Tasks { return r } -// NodeId Unique node identifiers, which are used to limit the response. -// API name: node_id -func (r *Tasks) NodeId(nodeids ...string) *Tasks { +// Nodes Unique node identifiers, which are used to limit the response. +// API name: nodes +func (r *Tasks) Nodes(nodes ...string) *Tasks { tmp := []string{} - for _, item := range nodeids { + for _, item := range nodes { tmp = append(tmp, fmt.Sprintf("%v", item)) } - r.values.Set("node_id", strings.Join(tmp, ",")) + r.values.Set("nodes", strings.Join(tmp, ",")) return r } @@ -322,15 +327,6 @@ func (r *Tasks) ParentTaskId(parenttaskid string) *Tasks { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Tasks) Format(format string) *Tasks { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Tasks) H(names ...string) *Tasks { @@ -339,40 +335,56 @@ func (r *Tasks) H(names ...string) *Tasks { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Tasks) Help(help bool) *Tasks { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Tasks) S(names ...string) *Tasks { + r.values.Set("s", strings.Join(names, ",")) return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Tasks) Local(local bool) *Tasks { - r.values.Set("local", strconv.FormatBool(local)) +// Time Unit used to display time values. +// API name: time +func (r *Tasks) Time(time timeunit.TimeUnit) *Tasks { + r.values.Set("time", time.String()) return r } -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Tasks) MasterTimeout(duration string) *Tasks { - r.values.Set("master_timeout", duration) +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Tasks) Timeout(duration string) *Tasks { + r.values.Set("timeout", duration) return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Tasks) S(names ...string) *Tasks { - r.values.Set("s", strings.Join(names, ",")) +// WaitForCompletion If `true`, the request blocks until the task has completed. +// API name: wait_for_completion +func (r *Tasks) WaitForCompletion(waitforcompletion bool) *Tasks { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Tasks) Format(format string) *Tasks { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Tasks) Help(help bool) *Tasks { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/templates/response.go b/typedapi/cat/templates/response.go index b528206021..543ed93b68 100644 --- a/typedapi/cat/templates/response.go +++ b/typedapi/cat/templates/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package templates @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package templates // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/templates/CatTemplatesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/templates/CatTemplatesResponse.ts#L22-L24 type Response []types.TemplatesRecord diff --git a/typedapi/cat/templates/templates.go b/typedapi/cat/templates/templates.go index 5a34478e36..e016b2d2d1 100644 --- a/typedapi/cat/templates/templates.go +++ b/typedapi/cat/templates/templates.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about index templates in a cluster. +// Get index template information. +// +// Get information about the index templates in a cluster. // You can use index templates to apply index settings and field mappings to new // indices at creation. // IMPORTANT: cat APIs are only intended for human consumption using the command @@ -79,7 +81,9 @@ func NewTemplatesFunc(tp elastictransport.Interface) NewTemplates { } } -// Returns information about index templates in a cluster. +// Get index template information. +// +// Get information about the index templates in a cluster. // You can use index templates to apply index settings and field mappings to new // indices at creation. // IMPORTANT: cat APIs are only intended for human consumption using the command @@ -315,15 +319,6 @@ func (r *Templates) Name(name string) *Templates { return r } -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *Templates) Format(format string) *Templates { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *Templates) H(names ...string) *Templates { @@ -332,11 +327,12 @@ func (r *Templates) H(names ...string) *Templates { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *Templates) Help(help bool) *Templates { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Templates) S(names ...string) *Templates { + r.values.Set("s", strings.Join(names, ",")) return r } @@ -360,12 +356,20 @@ func (r *Templates) MasterTimeout(duration string) *Templates { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *Templates) S(names ...string) *Templates { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Templates) Format(format string) *Templates { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Templates) Help(help bool) *Templates { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/threadpool/response.go b/typedapi/cat/threadpool/response.go index bae4eb350b..ccee341a58 100644 --- a/typedapi/cat/threadpool/response.go +++ b/typedapi/cat/threadpool/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package threadpool @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package threadpool // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/thread_pool/CatThreadPoolResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/thread_pool/CatThreadPoolResponse.ts#L22-L24 type Response []types.ThreadPoolRecord diff --git a/typedapi/cat/threadpool/thread_pool.go b/typedapi/cat/threadpool/thread_pool.go index 2801a15087..eb90171344 100644 --- a/typedapi/cat/threadpool/thread_pool.go +++ b/typedapi/cat/threadpool/thread_pool.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns thread pool statistics for each node in a cluster. +// Get thread pool statistics. +// +// Get thread pool statistics for each node in a cluster. // Returned information includes all built-in thread pools and custom thread // pools. // IMPORTANT: cat APIs are only intended for human consumption using the command @@ -80,7 +82,9 @@ func NewThreadPoolFunc(tp elastictransport.Interface) NewThreadPool { } } -// Returns thread pool statistics for each node in a cluster. +// Get thread pool statistics. +// +// Get thread pool statistics for each node in a cluster. // Returned information includes all built-in thread pools and custom thread // pools. // IMPORTANT: cat APIs are only intended for human consumption using the command @@ -316,23 +320,6 @@ func (r *ThreadPool) ThreadPoolPatterns(threadpoolpatterns string) *ThreadPool { return r } -// Time The unit used to display time values. -// API name: time -func (r *ThreadPool) Time(time timeunit.TimeUnit) *ThreadPool { - r.values.Set("time", time.String()) - - return r -} - -// Format Specifies the format to return the columnar data in, can be set to -// `text`, `json`, `cbor`, `yaml`, or `smile`. -// API name: format -func (r *ThreadPool) Format(format string) *ThreadPool { - r.values.Set("format", format) - - return r -} - // H List of columns to appear in the response. Supports simple wildcards. // API name: h func (r *ThreadPool) H(names ...string) *ThreadPool { @@ -341,11 +328,20 @@ func (r *ThreadPool) H(names ...string) *ThreadPool { return r } -// Help When set to `true` will output available columns. This option -// can't be combined with any other query string option. -// API name: help -func (r *ThreadPool) Help(help bool) *ThreadPool { - r.values.Set("help", strconv.FormatBool(help)) +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *ThreadPool) S(names ...string) *ThreadPool { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *ThreadPool) Time(time timeunit.TimeUnit) *ThreadPool { + r.values.Set("time", time.String()) return r } @@ -369,12 +365,20 @@ func (r *ThreadPool) MasterTimeout(duration string) *ThreadPool { return r } -// S List of columns that determine how the table should be sorted. -// Sorting defaults to ascending and can be changed by setting `:asc` -// or `:desc` as a suffix to the column name. -// API name: s -func (r *ThreadPool) S(names ...string) *ThreadPool { - r.values.Set("s", strings.Join(names, ",")) +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *ThreadPool) Format(format string) *ThreadPool { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *ThreadPool) Help(help bool) *ThreadPool { + r.values.Set("help", strconv.FormatBool(help)) return r } diff --git a/typedapi/cat/transforms/response.go b/typedapi/cat/transforms/response.go index 745ac79af3..d62a69e662 100644 --- a/typedapi/cat/transforms/response.go +++ b/typedapi/cat/transforms/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package transforms @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package transforms // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/transforms/CatTransformsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/transforms/CatTransformsResponse.ts#L22-L24 type Response []types.TransformsRecord diff --git a/typedapi/cat/transforms/transforms.go b/typedapi/cat/transforms/transforms.go index 7c8e9b01b2..b8c2485dd8 100644 --- a/typedapi/cat/transforms/transforms.go +++ b/typedapi/cat/transforms/transforms.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Get transforms. -// Returns configuration and usage information about transforms. +// Get transform information. +// +// Get configuration and usage information about transforms. // // CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For @@ -81,8 +82,9 @@ func NewTransformsFunc(tp elastictransport.Interface) NewTransforms { } } -// Get transforms. -// Returns configuration and usage information about transforms. +// Get transform information. +// +// Get configuration and usage information about transforms. // // CAT APIs are only intended for human consumption using the Kibana // console or command line. They are not intended for use by applications. For @@ -400,25 +402,6 @@ func (r *Transforms) Help(help bool) *Transforms { return r } -// Local If `true`, the request computes the list of selected nodes from the -// local cluster state. If `false` the list of selected nodes are computed -// from the cluster state of the master node. In both cases the coordinating -// node will send requests for further information to each selected node. -// API name: local -func (r *Transforms) Local(local bool) *Transforms { - r.values.Set("local", strconv.FormatBool(local)) - - return r -} - -// MasterTimeout Period to wait for a connection to the master node. -// API name: master_timeout -func (r *Transforms) MasterTimeout(duration string) *Transforms { - r.values.Set("master_timeout", duration) - - return r -} - // V When set to `true` will enable verbose output. // API name: v func (r *Transforms) V(v bool) *Transforms { diff --git a/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go b/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go index 5f6cf78822..02de7fefb1 100644 --- a/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go +++ b/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes auto-follow patterns. +// Delete auto-follow patterns. +// +// Delete a collection of cross-cluster replication auto-follow patterns. package deleteautofollowpattern import ( @@ -76,7 +78,9 @@ func NewDeleteAutoFollowPatternFunc(tp elastictransport.Interface) NewDeleteAuto } } -// Deletes auto-follow patterns. +// Delete auto-follow patterns. +// +// Delete a collection of cross-cluster replication auto-follow patterns. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html func New(tp elastictransport.Interface) *DeleteAutoFollowPattern { @@ -290,7 +294,7 @@ func (r *DeleteAutoFollowPattern) Header(key, value string) *DeleteAutoFollowPat return r } -// Name The name of the auto follow pattern. +// Name The auto-follow pattern collection to delete. // API Name: name func (r *DeleteAutoFollowPattern) _name(name string) *DeleteAutoFollowPattern { r.paramSet |= nameMask @@ -299,6 +303,17 @@ func (r *DeleteAutoFollowPattern) _name(name string) *DeleteAutoFollowPattern { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *DeleteAutoFollowPattern) MasterTimeout(duration string) *DeleteAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/deleteautofollowpattern/response.go b/typedapi/ccr/deleteautofollowpattern/response.go index e36b0887e0..5af486466d 100644 --- a/typedapi/ccr/deleteautofollowpattern/response.go +++ b/typedapi/ccr/deleteautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteautofollowpattern // Response holds the response body struct for the package deleteautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/follow/follow.go b/typedapi/ccr/follow/follow.go index 61e75acf3f..111646a749 100644 --- a/typedapi/ccr/follow/follow.go +++ b/typedapi/ccr/follow/follow.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a new follower index configured to follow the referenced leader -// index. +// Create a follower. +// Create a cross-cluster replication follower index that follows a specific +// leader index. +// When the API returns, the follower index exists and cross-cluster replication +// starts replicating operations from the leader index to the follower index. package follow import ( @@ -82,8 +85,11 @@ func NewFollowFunc(tp elastictransport.Interface) NewFollow { } } -// Creates a new follower index configured to follow the referenced leader -// index. +// Create a follower. +// Create a cross-cluster replication follower index that follows a specific +// leader index. +// When the API returns, the follower index exists and cross-cluster replication +// starts replicating operations from the leader index to the follower index. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html func New(tp elastictransport.Interface) *Follow { @@ -93,8 +99,6 @@ func New(tp elastictransport.Interface) *Follow { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -308,7 +312,7 @@ func (r *Follow) Header(key, value string) *Follow { return r } -// Index The name of the follower index +// Index The name of the follower index. // API Name: index func (r *Follow) _index(index string) *Follow { r.paramSet |= indexMask @@ -317,10 +321,20 @@ func (r *Follow) _index(index string) *Follow { return r } -// WaitForActiveShards Sets the number of shard copies that must be active before returning. -// Defaults to 0. Set to `all` for all shard copies, otherwise set to any -// non-negative value less than or equal to the total number of copies for the -// shard (number of replicas + 1) +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Follow) MasterTimeout(duration string) *Follow { + r.values.Set("master_timeout", duration) + + return r +} + +// WaitForActiveShards Specifies the number of shards to wait on being active before responding. +// This defaults to waiting on none of the shards to be +// active. +// A shard must be restored from the leader index before being active. Restoring +// a follower shard requires transferring all the +// remote Lucene segment files to the follower index. // API name: wait_for_active_shards func (r *Follow) WaitForActiveShards(waitforactiveshards string) *Follow { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -372,95 +386,198 @@ func (r *Follow) Pretty(pretty bool) *Follow { return r } +// If the leader index is part of a data stream, the name to which the local +// data stream for the followed index should be renamed. +// API name: data_stream_name +func (r *Follow) DataStreamName(datastreamname string) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DataStreamName = &datastreamname + + return r +} + +// The name of the index in the leader cluster to follow. // API name: leader_index func (r *Follow) LeaderIndex(indexname string) *Follow { - r.req.LeaderIndex = &indexname + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LeaderIndex = indexname return r } +// The maximum number of outstanding reads requests from the remote cluster. // API name: max_outstanding_read_requests func (r *Follow) MaxOutstandingReadRequests(maxoutstandingreadrequests int64) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests return r } +// The maximum number of outstanding write requests on the follower. // API name: max_outstanding_write_requests -func (r *Follow) MaxOutstandingWriteRequests(maxoutstandingwriterequests int64) *Follow { +func (r *Follow) MaxOutstandingWriteRequests(maxoutstandingwriterequests int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests return r } +// The maximum number of operations to pull per read from the remote cluster. // API name: max_read_request_operation_count -func (r *Follow) MaxReadRequestOperationCount(maxreadrequestoperationcount int64) *Follow { +func (r *Follow) MaxReadRequestOperationCount(maxreadrequestoperationcount int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount return r } +// The maximum size in bytes of per read of a batch of operations pulled from +// the remote cluster. // API name: max_read_request_size -func (r *Follow) MaxReadRequestSize(maxreadrequestsize string) *Follow { +func (r *Follow) MaxReadRequestSize(bytesize types.ByteSizeVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.MaxReadRequestSize = &maxreadrequestsize + r.req.MaxReadRequestSize = *bytesize.ByteSizeCaster() return r } +// The maximum time to wait before retrying an operation that failed +// exceptionally. An exponential backoff strategy is employed when +// retrying. // API name: max_retry_delay -func (r *Follow) MaxRetryDelay(duration types.Duration) *Follow { - r.req.MaxRetryDelay = duration +func (r *Follow) MaxRetryDelay(duration types.DurationVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRetryDelay = *duration.DurationCaster() return r } +// The maximum number of operations that can be queued for writing. When this +// limit is reached, reads from the remote cluster will be +// deferred until the number of queued operations goes below the limit. // API name: max_write_buffer_count -func (r *Follow) MaxWriteBufferCount(maxwritebuffercount int64) *Follow { +func (r *Follow) MaxWriteBufferCount(maxwritebuffercount int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteBufferCount = &maxwritebuffercount return r } +// The maximum total bytes of operations that can be queued for writing. When +// this limit is reached, reads from the remote cluster will +// be deferred until the total bytes of queued operations goes below the limit. // API name: max_write_buffer_size -func (r *Follow) MaxWriteBufferSize(maxwritebuffersize string) *Follow { +func (r *Follow) MaxWriteBufferSize(bytesize types.ByteSizeVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.MaxWriteBufferSize = &maxwritebuffersize + r.req.MaxWriteBufferSize = *bytesize.ByteSizeCaster() return r } +// The maximum number of operations per bulk write request executed on the +// follower. // API name: max_write_request_operation_count -func (r *Follow) MaxWriteRequestOperationCount(maxwriterequestoperationcount int64) *Follow { +func (r *Follow) MaxWriteRequestOperationCount(maxwriterequestoperationcount int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount return r } +// The maximum total bytes of operations per bulk write request executed on the +// follower. // API name: max_write_request_size -func (r *Follow) MaxWriteRequestSize(maxwriterequestsize string) *Follow { +func (r *Follow) MaxWriteRequestSize(bytesize types.ByteSizeVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.MaxWriteRequestSize = &maxwriterequestsize + r.req.MaxWriteRequestSize = *bytesize.ByteSizeCaster() return r } +// The maximum time to wait for new operations on the remote cluster when the +// follower index is synchronized with the leader index. +// When the timeout has elapsed, the poll for operations will return to the +// follower so that it can update some statistics. +// Then the follower will immediately attempt to read from the leader again. // API name: read_poll_timeout -func (r *Follow) ReadPollTimeout(duration types.Duration) *Follow { - r.req.ReadPollTimeout = duration +func (r *Follow) ReadPollTimeout(duration types.DurationVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ReadPollTimeout = *duration.DurationCaster() return r } +// The remote cluster containing the leader index. // API name: remote_cluster func (r *Follow) RemoteCluster(remotecluster string) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RemoteCluster = remotecluster + + return r +} + +// Settings to override from the leader index. +// API name: settings +func (r *Follow) Settings(settings types.IndexSettingsVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.RemoteCluster = &remotecluster + r.req.Settings = settings.IndexSettingsCaster() return r } diff --git a/typedapi/ccr/follow/request.go b/typedapi/ccr/follow/request.go index 85f7321f8b..0ec31dab30 100644 --- a/typedapi/ccr/follow/request.go +++ b/typedapi/ccr/follow/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package follow @@ -33,20 +33,51 @@ import ( // Request holds the request body struct for the package follow // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/follow/CreateFollowIndexRequest.ts#L25-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/follow/CreateFollowIndexRequest.ts#L26-L124 type Request struct { - LeaderIndex *string `json:"leader_index,omitempty"` - MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` - MaxOutstandingWriteRequests *int64 `json:"max_outstanding_write_requests,omitempty"` - MaxReadRequestOperationCount *int64 `json:"max_read_request_operation_count,omitempty"` - MaxReadRequestSize *string `json:"max_read_request_size,omitempty"` - MaxRetryDelay types.Duration `json:"max_retry_delay,omitempty"` - MaxWriteBufferCount *int64 `json:"max_write_buffer_count,omitempty"` - MaxWriteBufferSize *string `json:"max_write_buffer_size,omitempty"` - MaxWriteRequestOperationCount *int64 `json:"max_write_request_operation_count,omitempty"` - MaxWriteRequestSize *string `json:"max_write_request_size,omitempty"` - ReadPollTimeout types.Duration `json:"read_poll_timeout,omitempty"` - RemoteCluster *string `json:"remote_cluster,omitempty"` + + // DataStreamName If the leader index is part of a data stream, the name to which the local + // data stream for the followed index should be renamed. + DataStreamName *string `json:"data_stream_name,omitempty"` + // LeaderIndex The name of the index in the leader cluster to follow. + LeaderIndex string `json:"leader_index"` + // MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. + MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` + // MaxOutstandingWriteRequests The maximum number of outstanding write requests on the follower. + MaxOutstandingWriteRequests *int `json:"max_outstanding_write_requests,omitempty"` + // MaxReadRequestOperationCount The maximum number of operations to pull per read from the remote cluster. + MaxReadRequestOperationCount *int `json:"max_read_request_operation_count,omitempty"` + // MaxReadRequestSize The maximum size in bytes of per read of a batch of operations pulled from + // the remote cluster. + MaxReadRequestSize types.ByteSize `json:"max_read_request_size,omitempty"` + // MaxRetryDelay The maximum time to wait before retrying an operation that failed + // exceptionally. An exponential backoff strategy is employed when + // retrying. + MaxRetryDelay types.Duration `json:"max_retry_delay,omitempty"` + // MaxWriteBufferCount The maximum number of operations that can be queued for writing. When this + // limit is reached, reads from the remote cluster will be + // deferred until the number of queued operations goes below the limit. + MaxWriteBufferCount *int `json:"max_write_buffer_count,omitempty"` + // MaxWriteBufferSize The maximum total bytes of operations that can be queued for writing. When + // this limit is reached, reads from the remote cluster will + // be deferred until the total bytes of queued operations goes below the limit. + MaxWriteBufferSize types.ByteSize `json:"max_write_buffer_size,omitempty"` + // MaxWriteRequestOperationCount The maximum number of operations per bulk write request executed on the + // follower. + MaxWriteRequestOperationCount *int `json:"max_write_request_operation_count,omitempty"` + // MaxWriteRequestSize The maximum total bytes of operations per bulk write request executed on the + // follower. + MaxWriteRequestSize types.ByteSize `json:"max_write_request_size,omitempty"` + // ReadPollTimeout The maximum time to wait for new operations on the remote cluster when the + // follower index is synchronized with the leader index. + // When the timeout has elapsed, the poll for operations will return to the + // follower so that it can update some statistics. + // Then the follower will immediately attempt to read from the leader again. + ReadPollTimeout types.Duration `json:"read_poll_timeout,omitempty"` + // RemoteCluster The remote cluster containing the leader index. + RemoteCluster string `json:"remote_cluster"` + // Settings Settings to override from the leader index. + Settings *types.IndexSettings `json:"settings,omitempty"` } // NewRequest returns a Request @@ -82,6 +113,18 @@ func (s *Request) UnmarshalJSON(data []byte) error { switch t { + case "data_stream_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataStreamName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataStreamName = &o + case "leader_index": if err := dec.Decode(&s.LeaderIndex); err != nil { return fmt.Errorf("%s | %w", "LeaderIndex", err) @@ -103,46 +146,41 @@ func (s *Request) UnmarshalJSON(data []byte) error { } case "max_outstanding_write_requests": + var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.ParseInt(v, 10, 64) + value, err := strconv.Atoi(v) if err != nil { return fmt.Errorf("%s | %w", "MaxOutstandingWriteRequests", err) } s.MaxOutstandingWriteRequests = &value case float64: - f := int64(v) + f := int(v) s.MaxOutstandingWriteRequests = &f } case "max_read_request_operation_count": + var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.ParseInt(v, 10, 64) + value, err := strconv.Atoi(v) if err != nil { return fmt.Errorf("%s | %w", "MaxReadRequestOperationCount", err) } s.MaxReadRequestOperationCount = &value case float64: - f := int64(v) + f := int(v) s.MaxReadRequestOperationCount = &f } case "max_read_request_size": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.MaxReadRequestSize); err != nil { return fmt.Errorf("%s | %w", "MaxReadRequestSize", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.MaxReadRequestSize = &o case "max_retry_delay": if err := dec.Decode(&s.MaxRetryDelay); err != nil { @@ -150,58 +188,46 @@ func (s *Request) UnmarshalJSON(data []byte) error { } case "max_write_buffer_count": + var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.ParseInt(v, 10, 64) + value, err := strconv.Atoi(v) if err != nil { return fmt.Errorf("%s | %w", "MaxWriteBufferCount", err) } s.MaxWriteBufferCount = &value case float64: - f := int64(v) + f := int(v) s.MaxWriteBufferCount = &f } case "max_write_buffer_size": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.MaxWriteBufferSize); err != nil { return fmt.Errorf("%s | %w", "MaxWriteBufferSize", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.MaxWriteBufferSize = &o case "max_write_request_operation_count": + var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.ParseInt(v, 10, 64) + value, err := strconv.Atoi(v) if err != nil { return fmt.Errorf("%s | %w", "MaxWriteRequestOperationCount", err) } s.MaxWriteRequestOperationCount = &value case float64: - f := int64(v) + f := int(v) s.MaxWriteRequestOperationCount = &f } case "max_write_request_size": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.MaxWriteRequestSize); err != nil { return fmt.Errorf("%s | %w", "MaxWriteRequestSize", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.MaxWriteRequestSize = &o case "read_poll_timeout": if err := dec.Decode(&s.ReadPollTimeout); err != nil { @@ -218,7 +244,12 @@ func (s *Request) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.RemoteCluster = &o + s.RemoteCluster = o + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } } } diff --git a/typedapi/ccr/follow/response.go b/typedapi/ccr/follow/response.go index 224aaa8be3..3f686d6e32 100644 --- a/typedapi/ccr/follow/response.go +++ b/typedapi/ccr/follow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package follow // Response holds the response body struct for the package follow // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/follow/CreateFollowIndexResponse.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/follow/CreateFollowIndexResponse.ts#L20-L26 type Response struct { FollowIndexCreated bool `json:"follow_index_created"` FollowIndexShardsAcked bool `json:"follow_index_shards_acked"` diff --git a/typedapi/ccr/followinfo/follow_info.go b/typedapi/ccr/followinfo/follow_info.go index 01d302f872..6738409b12 100644 --- a/typedapi/ccr/followinfo/follow_info.go +++ b/typedapi/ccr/followinfo/follow_info.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information about all follower indices, including parameters and -// status for each follower index +// Get follower information. +// +// Get information about all cross-cluster replication follower indices. +// For example, the results include follower index names, leader index names, +// replication options, and whether the follower indices are active or paused. package followinfo import ( @@ -77,8 +80,11 @@ func NewFollowInfoFunc(tp elastictransport.Interface) NewFollowInfo { } } -// Retrieves information about all follower indices, including parameters and -// status for each follower index +// Get follower information. +// +// Get information about all cross-cluster replication follower indices. +// For example, the results include follower index names, leader index names, +// replication options, and whether the follower indices are active or paused. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html func New(tp elastictransport.Interface) *FollowInfo { @@ -292,8 +298,7 @@ func (r *FollowInfo) Header(key, value string) *FollowInfo { return r } -// Index A comma-separated list of index patterns; use `_all` to perform the operation -// on all indices +// Index A comma-delimited list of follower index patterns. // API Name: index func (r *FollowInfo) _index(index string) *FollowInfo { r.paramSet |= indexMask @@ -302,6 +307,17 @@ func (r *FollowInfo) _index(index string) *FollowInfo { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *FollowInfo) MasterTimeout(duration string) *FollowInfo { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/followinfo/response.go b/typedapi/ccr/followinfo/response.go index 5a5e9721e4..933e7603d8 100644 --- a/typedapi/ccr/followinfo/response.go +++ b/typedapi/ccr/followinfo/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package followinfo @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package followinfo // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/follow_info/FollowInfoResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/follow_info/FollowInfoResponse.ts#L22-L24 type Response struct { FollowerIndices []types.FollowerIndex `json:"follower_indices"` } diff --git a/typedapi/ccr/followstats/follow_stats.go b/typedapi/ccr/followstats/follow_stats.go index c7b2ad94c2..59b1ef933f 100644 --- a/typedapi/ccr/followstats/follow_stats.go +++ b/typedapi/ccr/followstats/follow_stats.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves follower stats. return shard-level stats about the following tasks -// associated with each shard for the specified indices. +// Get follower stats. +// +// Get cross-cluster replication follower stats. +// The API returns shard-level stats about the "following tasks" associated with +// each shard for the specified indices. package followstats import ( @@ -77,8 +80,11 @@ func NewFollowStatsFunc(tp elastictransport.Interface) NewFollowStats { } } -// Retrieves follower stats. return shard-level stats about the following tasks -// associated with each shard for the specified indices. +// Get follower stats. +// +// Get cross-cluster replication follower stats. +// The API returns shard-level stats about the "following tasks" associated with +// each shard for the specified indices. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html func New(tp elastictransport.Interface) *FollowStats { @@ -292,8 +298,7 @@ func (r *FollowStats) Header(key, value string) *FollowStats { return r } -// Index A comma-separated list of index patterns; use `_all` to perform the operation -// on all indices +// Index A comma-delimited list of index patterns. // API Name: index func (r *FollowStats) _index(index string) *FollowStats { r.paramSet |= indexMask @@ -302,6 +307,16 @@ func (r *FollowStats) _index(index string) *FollowStats { return r } +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *FollowStats) Timeout(duration string) *FollowStats { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/followstats/response.go b/typedapi/ccr/followstats/response.go index 0f68a1df21..7ec7c0eff1 100644 --- a/typedapi/ccr/followstats/response.go +++ b/typedapi/ccr/followstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package followstats @@ -26,8 +26,10 @@ import ( // Response holds the response body struct for the package followstats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/follow_stats/FollowIndexStatsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/follow_stats/FollowIndexStatsResponse.ts#L22-L27 type Response struct { + + // Indices An array of follower index statistics. Indices []types.FollowIndexStats `json:"indices"` } diff --git a/typedapi/ccr/forgetfollower/forget_follower.go b/typedapi/ccr/forgetfollower/forget_follower.go index 3b139bc2a1..d3a49596b6 100644 --- a/typedapi/ccr/forgetfollower/forget_follower.go +++ b/typedapi/ccr/forgetfollower/forget_follower.go @@ -16,9 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Removes the follower retention leases from the leader. +// Forget a follower. +// Remove the cross-cluster replication follower retention leases from the +// leader. +// +// A following index takes out retention leases on its leader index. +// These leases are used to increase the likelihood that the shards of the +// leader index retain the history of operations that the shards of the +// following index need to run replication. +// When a follower index is converted to a regular index by the unfollow API +// (either by directly calling the API or by index lifecycle management tasks), +// these leases are removed. +// However, removal of the leases can fail, for example when the remote cluster +// containing the leader index is unavailable. +// While the leases will eventually expire on their own, their extended +// existence can cause the leader index to hold more history than necessary and +// prevent index lifecycle management from performing some operations on the +// leader index. +// This API exists to enable manually removing the leases when the unfollow API +// is unable to do so. +// +// NOTE: This API does not stop replication by a following index. If you use +// this API with a follower index that is still actively following, the +// following index will add back retention leases on the leader. +// The only purpose of this API is to handle the case of failure to remove the +// following retention leases after the unfollow API is invoked. package forgetfollower import ( @@ -81,7 +105,31 @@ func NewForgetFollowerFunc(tp elastictransport.Interface) NewForgetFollower { } } -// Removes the follower retention leases from the leader. +// Forget a follower. +// Remove the cross-cluster replication follower retention leases from the +// leader. +// +// A following index takes out retention leases on its leader index. +// These leases are used to increase the likelihood that the shards of the +// leader index retain the history of operations that the shards of the +// following index need to run replication. +// When a follower index is converted to a regular index by the unfollow API +// (either by directly calling the API or by index lifecycle management tasks), +// these leases are removed. +// However, removal of the leases can fail, for example when the remote cluster +// containing the leader index is unavailable. +// While the leases will eventually expire on their own, their extended +// existence can cause the leader index to hold more history than necessary and +// prevent index lifecycle management from performing some operations on the +// leader index. +// This API exists to enable manually removing the leases when the unfollow API +// is unable to do so. +// +// NOTE: This API does not stop replication by a following index. If you use +// this API with a follower index that is still actively following, the +// following index will add back retention leases on the leader. +// The only purpose of this API is to handle the case of failure to remove the +// following retention leases after the unfollow API is invoked. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-forget-follower.html func New(tp elastictransport.Interface) *ForgetFollower { @@ -91,8 +139,6 @@ func New(tp elastictransport.Interface) *ForgetFollower { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -316,6 +362,15 @@ func (r *ForgetFollower) _index(index string) *ForgetFollower { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *ForgetFollower) Timeout(duration string) *ForgetFollower { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -362,6 +417,10 @@ func (r *ForgetFollower) Pretty(pretty bool) *ForgetFollower { // API name: follower_cluster func (r *ForgetFollower) FollowerCluster(followercluster string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.FollowerCluster = &followercluster @@ -370,6 +429,11 @@ func (r *ForgetFollower) FollowerCluster(followercluster string) *ForgetFollower // API name: follower_index func (r *ForgetFollower) FollowerIndex(indexname string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FollowerIndex = &indexname return r @@ -377,6 +441,11 @@ func (r *ForgetFollower) FollowerIndex(indexname string) *ForgetFollower { // API name: follower_index_uuid func (r *ForgetFollower) FollowerIndexUuid(uuid string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FollowerIndexUuid = &uuid return r @@ -384,6 +453,10 @@ func (r *ForgetFollower) FollowerIndexUuid(uuid string) *ForgetFollower { // API name: leader_remote_cluster func (r *ForgetFollower) LeaderRemoteCluster(leaderremotecluster string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LeaderRemoteCluster = &leaderremotecluster diff --git a/typedapi/ccr/forgetfollower/request.go b/typedapi/ccr/forgetfollower/request.go index 5539d78e1f..bfde57725e 100644 --- a/typedapi/ccr/forgetfollower/request.go +++ b/typedapi/ccr/forgetfollower/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package forgetfollower @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package forgetfollower // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/forget_follower/ForgetFollowerIndexRequest.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/forget_follower/ForgetFollowerIndexRequest.ts#L24-L65 type Request struct { FollowerCluster *string `json:"follower_cluster,omitempty"` FollowerIndex *string `json:"follower_index,omitempty"` diff --git a/typedapi/ccr/forgetfollower/response.go b/typedapi/ccr/forgetfollower/response.go index 3b95a47c1c..d98719d6ac 100644 --- a/typedapi/ccr/forgetfollower/response.go +++ b/typedapi/ccr/forgetfollower/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package forgetfollower @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package forgetfollower // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/forget_follower/ForgetFollowerIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/forget_follower/ForgetFollowerIndexResponse.ts#L22-L24 type Response struct { Shards_ types.ShardStatistics `json:"_shards"` } diff --git a/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go b/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go index dfa4c2b555..3d4cc251c5 100644 --- a/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go +++ b/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Gets configured auto-follow patterns. Returns the specified auto-follow -// pattern collection. +// Get auto-follow patterns. +// +// Get cross-cluster replication auto-follow patterns. package getautofollowpattern import ( @@ -75,8 +76,9 @@ func NewGetAutoFollowPatternFunc(tp elastictransport.Interface) NewGetAutoFollow } } -// Gets configured auto-follow patterns. Returns the specified auto-follow -// pattern collection. +// Get auto-follow patterns. +// +// Get cross-cluster replication auto-follow patterns. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html func New(tp elastictransport.Interface) *GetAutoFollowPattern { @@ -297,8 +299,9 @@ func (r *GetAutoFollowPattern) Header(key, value string) *GetAutoFollowPattern { return r } -// Name Specifies the auto-follow pattern collection that you want to retrieve. If -// you do not specify a name, the API returns information for all collections. +// Name The auto-follow pattern collection that you want to retrieve. +// If you do not specify a name, the API returns information for all +// collections. // API Name: name func (r *GetAutoFollowPattern) Name(name string) *GetAutoFollowPattern { r.paramSet |= nameMask @@ -307,6 +310,17 @@ func (r *GetAutoFollowPattern) Name(name string) *GetAutoFollowPattern { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *GetAutoFollowPattern) MasterTimeout(duration string) *GetAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/getautofollowpattern/response.go b/typedapi/ccr/getautofollowpattern/response.go index a9f2b0fc59..7e3136457b 100644 --- a/typedapi/ccr/getautofollowpattern/response.go +++ b/typedapi/ccr/getautofollowpattern/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getautofollowpattern @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/get_auto_follow_pattern/GetAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/get_auto_follow_pattern/GetAutoFollowPatternResponse.ts#L22-L24 type Response struct { Patterns []types.AutoFollowPattern `json:"patterns"` } diff --git a/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go b/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go index 9ae061f626..345f154ab0 100644 --- a/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go +++ b/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go @@ -16,9 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Pauses an auto-follow pattern +// Pause an auto-follow pattern. +// +// Pause a cross-cluster replication auto-follow pattern. +// When the API returns, the auto-follow pattern is inactive. +// New indices that are created on the remote cluster and match the auto-follow +// patterns are ignored. +// +// You can resume auto-following with the resume auto-follow pattern API. +// When it resumes, the auto-follow pattern is active again and automatically +// configures follower indices for newly created indices on the remote cluster +// that match its patterns. +// Remote indices that were created while the pattern was paused will also be +// followed, unless they have been deleted or closed in the interim. package pauseautofollowpattern import ( @@ -76,7 +88,19 @@ func NewPauseAutoFollowPatternFunc(tp elastictransport.Interface) NewPauseAutoFo } } -// Pauses an auto-follow pattern +// Pause an auto-follow pattern. +// +// Pause a cross-cluster replication auto-follow pattern. +// When the API returns, the auto-follow pattern is inactive. +// New indices that are created on the remote cluster and match the auto-follow +// patterns are ignored. +// +// You can resume auto-following with the resume auto-follow pattern API. +// When it resumes, the auto-follow pattern is active again and automatically +// configures follower indices for newly created indices on the remote cluster +// that match its patterns. +// Remote indices that were created while the pattern was paused will also be +// followed, unless they have been deleted or closed in the interim. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html func New(tp elastictransport.Interface) *PauseAutoFollowPattern { @@ -292,8 +316,7 @@ func (r *PauseAutoFollowPattern) Header(key, value string) *PauseAutoFollowPatte return r } -// Name The name of the auto follow pattern that should pause discovering new indices -// to follow. +// Name The name of the auto-follow pattern to pause. // API Name: name func (r *PauseAutoFollowPattern) _name(name string) *PauseAutoFollowPattern { r.paramSet |= nameMask @@ -302,6 +325,17 @@ func (r *PauseAutoFollowPattern) _name(name string) *PauseAutoFollowPattern { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *PauseAutoFollowPattern) MasterTimeout(duration string) *PauseAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/pauseautofollowpattern/response.go b/typedapi/ccr/pauseautofollowpattern/response.go index 2887379cb9..9a1d738ae9 100644 --- a/typedapi/ccr/pauseautofollowpattern/response.go +++ b/typedapi/ccr/pauseautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package pauseautofollowpattern // Response holds the response body struct for the package pauseautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/pause_auto_follow_pattern/PauseAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/pause_auto_follow_pattern/PauseAutoFollowPatternResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/pausefollow/pause_follow.go b/typedapi/ccr/pausefollow/pause_follow.go index 3acc62d252..c3587d9bf9 100644 --- a/typedapi/ccr/pausefollow/pause_follow.go +++ b/typedapi/ccr/pausefollow/pause_follow.go @@ -16,10 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Pauses a follower index. The follower index will not fetch any additional -// operations from the leader index. +// Pause a follower. +// +// Pause a cross-cluster replication follower index. +// The follower index will not fetch any additional operations from the leader +// index. +// You can resume following with the resume follower API. +// You can pause and resume a follower index to change the configuration of the +// following task. package pausefollow import ( @@ -77,8 +83,14 @@ func NewPauseFollowFunc(tp elastictransport.Interface) NewPauseFollow { } } -// Pauses a follower index. The follower index will not fetch any additional -// operations from the leader index. +// Pause a follower. +// +// Pause a cross-cluster replication follower index. +// The follower index will not fetch any additional operations from the leader +// index. +// You can resume following with the resume follower API. +// You can pause and resume a follower index to change the configuration of the +// following task. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html func New(tp elastictransport.Interface) *PauseFollow { @@ -292,7 +304,7 @@ func (r *PauseFollow) Header(key, value string) *PauseFollow { return r } -// Index The name of the follower index that should pause following its leader index. +// Index The name of the follower index. // API Name: index func (r *PauseFollow) _index(index string) *PauseFollow { r.paramSet |= indexMask @@ -301,6 +313,17 @@ func (r *PauseFollow) _index(index string) *PauseFollow { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *PauseFollow) MasterTimeout(duration string) *PauseFollow { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/pausefollow/response.go b/typedapi/ccr/pausefollow/response.go index 68f56d7081..b40bb77687 100644 --- a/typedapi/ccr/pausefollow/response.go +++ b/typedapi/ccr/pausefollow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package pausefollow // Response holds the response body struct for the package pausefollow // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/pause_follow/PauseFollowIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/pause_follow/PauseFollowIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go b/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go index dd78a102ce..86582a235a 100644 --- a/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go +++ b/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Creates a new named collection of auto-follow patterns against a specified -// remote cluster. Newly created indices on the remote cluster matching any of -// the specified patterns will be automatically configured as follower indices. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Create or update auto-follow patterns. +// Create a collection of cross-cluster replication auto-follow patterns for a +// remote cluster. +// Newly created indices on the remote cluster that match any of the patterns +// are automatically configured as follower indices. +// Indices on the remote cluster that were created before the auto-follow +// pattern was created will not be auto-followed even if they match the pattern. +// +// This API can also be used to update auto-follow patterns. +// NOTE: Follower indices that were configured automatically before updating an +// auto-follow pattern will remain unchanged even if they do not match against +// the new patterns. package putautofollowpattern import ( @@ -83,9 +92,18 @@ func NewPutAutoFollowPatternFunc(tp elastictransport.Interface) NewPutAutoFollow } } -// Creates a new named collection of auto-follow patterns against a specified -// remote cluster. Newly created indices on the remote cluster matching any of -// the specified patterns will be automatically configured as follower indices. +// Create or update auto-follow patterns. +// Create a collection of cross-cluster replication auto-follow patterns for a +// remote cluster. +// Newly created indices on the remote cluster that match any of the patterns +// are automatically configured as follower indices. +// Indices on the remote cluster that were created before the auto-follow +// pattern was created will not be auto-followed even if they match the pattern. +// +// This API can also be used to update auto-follow patterns. +// NOTE: Follower indices that were configured automatically before updating an +// auto-follow pattern will remain unchanged even if they do not match against +// the new patterns. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html func New(tp elastictransport.Interface) *PutAutoFollowPattern { @@ -95,8 +113,6 @@ func New(tp elastictransport.Interface) *PutAutoFollowPattern { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -319,6 +335,14 @@ func (r *PutAutoFollowPattern) _name(name string) *PutAutoFollowPattern { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PutAutoFollowPattern) MasterTimeout(duration string) *PutAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -363,144 +387,234 @@ func (r *PutAutoFollowPattern) Pretty(pretty bool) *PutAutoFollowPattern { return r } -// FollowIndexPattern The name of follower index. The template {{leader_index}} can be used to +// The name of follower index. The template {{leader_index}} can be used to // derive the name of the follower index from the name of the leader index. When // following a data stream, use {{leader_index}}; CCR does not support changes // to the names of a follower data stream’s backing indices. // API name: follow_index_pattern func (r *PutAutoFollowPattern) FollowIndexPattern(indexpattern string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FollowIndexPattern = &indexpattern return r } -// LeaderIndexExclusionPatterns An array of simple index patterns that can be used to exclude indices from +// An array of simple index patterns that can be used to exclude indices from // being auto-followed. Indices in the remote cluster whose names are matching // one or more leader_index_patterns and one or more // leader_index_exclusion_patterns won’t be followed. // API name: leader_index_exclusion_patterns func (r *PutAutoFollowPattern) LeaderIndexExclusionPatterns(indexpatterns ...string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.LeaderIndexExclusionPatterns = indexpatterns return r } -// LeaderIndexPatterns An array of simple index patterns to match against indices in the remote +// An array of simple index patterns to match against indices in the remote // cluster specified by the remote_cluster field. // API name: leader_index_patterns func (r *PutAutoFollowPattern) LeaderIndexPatterns(indexpatterns ...string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.LeaderIndexPatterns = indexpatterns return r } -// MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. +// The maximum number of outstanding reads requests from the remote cluster. // API name: max_outstanding_read_requests func (r *PutAutoFollowPattern) MaxOutstandingReadRequests(maxoutstandingreadrequests int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests return r } -// MaxOutstandingWriteRequests The maximum number of outstanding reads requests from the remote cluster. +// The maximum number of outstanding reads requests from the remote cluster. // API name: max_outstanding_write_requests func (r *PutAutoFollowPattern) MaxOutstandingWriteRequests(maxoutstandingwriterequests int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests return r } -// MaxReadRequestOperationCount The maximum number of operations to pull per read from the remote cluster. +// The maximum number of operations to pull per read from the remote cluster. // API name: max_read_request_operation_count func (r *PutAutoFollowPattern) MaxReadRequestOperationCount(maxreadrequestoperationcount int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount return r } -// MaxReadRequestSize The maximum size in bytes of per read of a batch of operations pulled from +// The maximum size in bytes of per read of a batch of operations pulled from // the remote cluster. // API name: max_read_request_size -func (r *PutAutoFollowPattern) MaxReadRequestSize(bytesize types.ByteSize) *PutAutoFollowPattern { - r.req.MaxReadRequestSize = bytesize +func (r *PutAutoFollowPattern) MaxReadRequestSize(bytesize types.ByteSizeVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxReadRequestSize = *bytesize.ByteSizeCaster() return r } -// MaxRetryDelay The maximum time to wait before retrying an operation that failed +// The maximum time to wait before retrying an operation that failed // exceptionally. An exponential backoff strategy is employed when retrying. // API name: max_retry_delay -func (r *PutAutoFollowPattern) MaxRetryDelay(duration types.Duration) *PutAutoFollowPattern { - r.req.MaxRetryDelay = duration +func (r *PutAutoFollowPattern) MaxRetryDelay(duration types.DurationVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRetryDelay = *duration.DurationCaster() return r } -// MaxWriteBufferCount The maximum number of operations that can be queued for writing. When this +// The maximum number of operations that can be queued for writing. When this // limit is reached, reads from the remote cluster will be deferred until the // number of queued operations goes below the limit. // API name: max_write_buffer_count func (r *PutAutoFollowPattern) MaxWriteBufferCount(maxwritebuffercount int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxWriteBufferCount = &maxwritebuffercount return r } -// MaxWriteBufferSize The maximum total bytes of operations that can be queued for writing. When +// The maximum total bytes of operations that can be queued for writing. When // this limit is reached, reads from the remote cluster will be deferred until // the total bytes of queued operations goes below the limit. // API name: max_write_buffer_size -func (r *PutAutoFollowPattern) MaxWriteBufferSize(bytesize types.ByteSize) *PutAutoFollowPattern { - r.req.MaxWriteBufferSize = bytesize +func (r *PutAutoFollowPattern) MaxWriteBufferSize(bytesize types.ByteSizeVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteBufferSize = *bytesize.ByteSizeCaster() return r } -// MaxWriteRequestOperationCount The maximum number of operations per bulk write request executed on the +// The maximum number of operations per bulk write request executed on the // follower. // API name: max_write_request_operation_count func (r *PutAutoFollowPattern) MaxWriteRequestOperationCount(maxwriterequestoperationcount int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount return r } -// MaxWriteRequestSize The maximum total bytes of operations per bulk write request executed on the +// The maximum total bytes of operations per bulk write request executed on the // follower. // API name: max_write_request_size -func (r *PutAutoFollowPattern) MaxWriteRequestSize(bytesize types.ByteSize) *PutAutoFollowPattern { - r.req.MaxWriteRequestSize = bytesize +func (r *PutAutoFollowPattern) MaxWriteRequestSize(bytesize types.ByteSizeVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteRequestSize = *bytesize.ByteSizeCaster() return r } -// ReadPollTimeout The maximum time to wait for new operations on the remote cluster when the +// The maximum time to wait for new operations on the remote cluster when the // follower index is synchronized with the leader index. When the timeout has // elapsed, the poll for operations will return to the follower so that it can // update some statistics. Then the follower will immediately attempt to read // from the leader again. // API name: read_poll_timeout -func (r *PutAutoFollowPattern) ReadPollTimeout(duration types.Duration) *PutAutoFollowPattern { - r.req.ReadPollTimeout = duration +func (r *PutAutoFollowPattern) ReadPollTimeout(duration types.DurationVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ReadPollTimeout = *duration.DurationCaster() return r } -// RemoteCluster The remote cluster containing the leader indices to match against. +// The remote cluster containing the leader indices to match against. // API name: remote_cluster func (r *PutAutoFollowPattern) RemoteCluster(remotecluster string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RemoteCluster = remotecluster return r } -// Settings Settings to override from the leader index. Note that certain settings can +// Settings to override from the leader index. Note that certain settings can // not be overrode (e.g., index.number_of_shards). // API name: settings func (r *PutAutoFollowPattern) Settings(settings map[string]json.RawMessage) *PutAutoFollowPattern { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *PutAutoFollowPattern) AddSetting(key string, value json.RawMessage) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/ccr/putautofollowpattern/request.go b/typedapi/ccr/putautofollowpattern/request.go index 49c2020431..37f9e09eef 100644 --- a/typedapi/ccr/putautofollowpattern/request.go +++ b/typedapi/ccr/putautofollowpattern/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putautofollowpattern @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternRequest.ts#L27-L112 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternRequest.ts#L27-L133 type Request struct { // FollowIndexPattern The name of follower index. The template {{leader_index}} can be used to diff --git a/typedapi/ccr/putautofollowpattern/response.go b/typedapi/ccr/putautofollowpattern/response.go index 7a1b58e892..302791ffa0 100644 --- a/typedapi/ccr/putautofollowpattern/response.go +++ b/typedapi/ccr/putautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putautofollowpattern // Response holds the response body struct for the package putautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/resumeautofollowpattern/response.go b/typedapi/ccr/resumeautofollowpattern/response.go index 866c7df925..dc796aa57f 100644 --- a/typedapi/ccr/resumeautofollowpattern/response.go +++ b/typedapi/ccr/resumeautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package resumeautofollowpattern // Response holds the response body struct for the package resumeautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go b/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go index 66ad35c709..fc911b86e2 100644 --- a/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go +++ b/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Resumes an auto-follow pattern that has been paused +// Resume an auto-follow pattern. +// +// Resume a cross-cluster replication auto-follow pattern that was paused. +// The auto-follow pattern will resume configuring following indices for newly +// created indices that match its patterns on the remote cluster. +// Remote indices created while the pattern was paused will also be followed +// unless they have been deleted or closed in the interim. package resumeautofollowpattern import ( @@ -76,7 +82,13 @@ func NewResumeAutoFollowPatternFunc(tp elastictransport.Interface) NewResumeAuto } } -// Resumes an auto-follow pattern that has been paused +// Resume an auto-follow pattern. +// +// Resume a cross-cluster replication auto-follow pattern that was paused. +// The auto-follow pattern will resume configuring following indices for newly +// created indices that match its patterns on the remote cluster. +// Remote indices created while the pattern was paused will also be followed +// unless they have been deleted or closed in the interim. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html func New(tp elastictransport.Interface) *ResumeAutoFollowPattern { @@ -292,8 +304,7 @@ func (r *ResumeAutoFollowPattern) Header(key, value string) *ResumeAutoFollowPat return r } -// Name The name of the auto follow pattern to resume discovering new indices to -// follow. +// Name The name of the auto-follow pattern to resume. // API Name: name func (r *ResumeAutoFollowPattern) _name(name string) *ResumeAutoFollowPattern { r.paramSet |= nameMask @@ -302,6 +313,17 @@ func (r *ResumeAutoFollowPattern) _name(name string) *ResumeAutoFollowPattern { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *ResumeAutoFollowPattern) MasterTimeout(duration string) *ResumeAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/resumefollow/request.go b/typedapi/ccr/resumefollow/request.go index a4826d0627..9fd0227d6d 100644 --- a/typedapi/ccr/resumefollow/request.go +++ b/typedapi/ccr/resumefollow/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package resumefollow @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package resumefollow // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/resume_follow/ResumeFollowIndexRequest.ts#L25-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/resume_follow/ResumeFollowIndexRequest.ts#L25-L65 type Request struct { MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` MaxOutstandingWriteRequests *int64 `json:"max_outstanding_write_requests,omitempty"` diff --git a/typedapi/ccr/resumefollow/response.go b/typedapi/ccr/resumefollow/response.go index 7512c2946d..6852dac056 100644 --- a/typedapi/ccr/resumefollow/response.go +++ b/typedapi/ccr/resumefollow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package resumefollow // Response holds the response body struct for the package resumefollow // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/resume_follow/ResumeFollowIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/resume_follow/ResumeFollowIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/resumefollow/resume_follow.go b/typedapi/ccr/resumefollow/resume_follow.go index 62644ea50d..69832a6f7b 100644 --- a/typedapi/ccr/resumefollow/resume_follow.go +++ b/typedapi/ccr/resumefollow/resume_follow.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Resumes a follower index that has been paused +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Resume a follower. +// Resume a cross-cluster replication follower index that was paused. +// The follower index could have been paused with the pause follower API. +// Alternatively it could be paused due to replication that cannot be retried +// due to failures during following tasks. +// When this API returns, the follower index will resume fetching operations +// from the leader index. package resumefollow import ( @@ -81,7 +87,13 @@ func NewResumeFollowFunc(tp elastictransport.Interface) NewResumeFollow { } } -// Resumes a follower index that has been paused +// Resume a follower. +// Resume a cross-cluster replication follower index that was paused. +// The follower index could have been paused with the pause follower API. +// Alternatively it could be paused due to replication that cannot be retried +// due to failures during following tasks. +// When this API returns, the follower index will resume fetching operations +// from the leader index. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html func New(tp elastictransport.Interface) *ResumeFollow { @@ -91,8 +103,6 @@ func New(tp elastictransport.Interface) *ResumeFollow { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -315,6 +325,14 @@ func (r *ResumeFollow) _index(index string) *ResumeFollow { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *ResumeFollow) MasterTimeout(duration string) *ResumeFollow { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -361,6 +379,10 @@ func (r *ResumeFollow) Pretty(pretty bool) *ResumeFollow { // API name: max_outstanding_read_requests func (r *ResumeFollow) MaxOutstandingReadRequests(maxoutstandingreadrequests int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests @@ -369,6 +391,10 @@ func (r *ResumeFollow) MaxOutstandingReadRequests(maxoutstandingreadrequests int // API name: max_outstanding_write_requests func (r *ResumeFollow) MaxOutstandingWriteRequests(maxoutstandingwriterequests int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests @@ -377,6 +403,10 @@ func (r *ResumeFollow) MaxOutstandingWriteRequests(maxoutstandingwriterequests i // API name: max_read_request_operation_count func (r *ResumeFollow) MaxReadRequestOperationCount(maxreadrequestoperationcount int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount @@ -385,6 +415,10 @@ func (r *ResumeFollow) MaxReadRequestOperationCount(maxreadrequestoperationcount // API name: max_read_request_size func (r *ResumeFollow) MaxReadRequestSize(maxreadrequestsize string) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxReadRequestSize = &maxreadrequestsize @@ -392,14 +426,23 @@ func (r *ResumeFollow) MaxReadRequestSize(maxreadrequestsize string) *ResumeFoll } // API name: max_retry_delay -func (r *ResumeFollow) MaxRetryDelay(duration types.Duration) *ResumeFollow { - r.req.MaxRetryDelay = duration +func (r *ResumeFollow) MaxRetryDelay(duration types.DurationVariant) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRetryDelay = *duration.DurationCaster() return r } // API name: max_write_buffer_count func (r *ResumeFollow) MaxWriteBufferCount(maxwritebuffercount int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteBufferCount = &maxwritebuffercount @@ -408,6 +451,10 @@ func (r *ResumeFollow) MaxWriteBufferCount(maxwritebuffercount int64) *ResumeFol // API name: max_write_buffer_size func (r *ResumeFollow) MaxWriteBufferSize(maxwritebuffersize string) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteBufferSize = &maxwritebuffersize @@ -416,6 +463,10 @@ func (r *ResumeFollow) MaxWriteBufferSize(maxwritebuffersize string) *ResumeFoll // API name: max_write_request_operation_count func (r *ResumeFollow) MaxWriteRequestOperationCount(maxwriterequestoperationcount int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount @@ -424,6 +475,10 @@ func (r *ResumeFollow) MaxWriteRequestOperationCount(maxwriterequestoperationcou // API name: max_write_request_size func (r *ResumeFollow) MaxWriteRequestSize(maxwriterequestsize string) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxWriteRequestSize = &maxwriterequestsize @@ -431,8 +486,13 @@ func (r *ResumeFollow) MaxWriteRequestSize(maxwriterequestsize string) *ResumeFo } // API name: read_poll_timeout -func (r *ResumeFollow) ReadPollTimeout(duration types.Duration) *ResumeFollow { - r.req.ReadPollTimeout = duration +func (r *ResumeFollow) ReadPollTimeout(duration types.DurationVariant) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ReadPollTimeout = *duration.DurationCaster() return r } diff --git a/typedapi/ccr/stats/response.go b/typedapi/ccr/stats/response.go index 52d62c2719..e642cb5b80 100644 --- a/typedapi/ccr/stats/response.go +++ b/typedapi/ccr/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stats @@ -26,10 +26,13 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/stats/CcrStatsResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/stats/CcrStatsResponse.ts#L22-L29 type Response struct { + + // AutoFollowStats Statistics for the auto-follow coordinator. AutoFollowStats types.AutoFollowStats `json:"auto_follow_stats"` - FollowStats types.FollowStats `json:"follow_stats"` + // FollowStats Shard-level statistics for follower indices. + FollowStats types.FollowStats `json:"follow_stats"` } // NewResponse returns a Response diff --git a/typedapi/ccr/stats/stats.go b/typedapi/ccr/stats/stats.go index 7b4bba3e38..8298da1bf3 100644 --- a/typedapi/ccr/stats/stats.go +++ b/typedapi/ccr/stats/stats.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Gets all stats related to cross-cluster replication. +// Get cross-cluster replication stats. +// +// This API returns stats about auto-following and the same shard-level stats as +// the get follower stats API. package stats import ( @@ -68,7 +71,10 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Gets all stats related to cross-cluster replication. +// Get cross-cluster replication stats. +// +// This API returns stats about auto-following and the same shard-level stats as +// the get follower stats API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html func New(tp elastictransport.Interface) *Stats { @@ -276,6 +282,26 @@ func (r *Stats) Header(key, value string) *Stats { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *Stats) MasterTimeout(duration string) *Stats { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *Stats) Timeout(duration string) *Stats { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ccr/unfollow/response.go b/typedapi/ccr/unfollow/response.go index be1cf34272..992d8765c5 100644 --- a/typedapi/ccr/unfollow/response.go +++ b/typedapi/ccr/unfollow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package unfollow // Response holds the response body struct for the package unfollow // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/unfollow/UnfollowIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/unfollow/UnfollowIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ccr/unfollow/unfollow.go b/typedapi/ccr/unfollow/unfollow.go index 5992ab72f5..7e0e779ef8 100644 --- a/typedapi/ccr/unfollow/unfollow.go +++ b/typedapi/ccr/unfollow/unfollow.go @@ -16,10 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Stops the following task associated with a follower index and removes index -// metadata and settings associated with cross-cluster replication. +// Unfollow an index. +// +// Convert a cross-cluster replication follower index to a regular index. +// The API stops the following task associated with a follower index and removes +// index metadata and settings associated with cross-cluster replication. +// The follower index must be paused and closed before you call the unfollow +// API. +// +// > info +// > Currently cross-cluster replication does not support converting an existing +// regular index to a follower index. Converting a follower index to a regular +// index is an irreversible operation. package unfollow import ( @@ -77,8 +87,18 @@ func NewUnfollowFunc(tp elastictransport.Interface) NewUnfollow { } } -// Stops the following task associated with a follower index and removes index -// metadata and settings associated with cross-cluster replication. +// Unfollow an index. +// +// Convert a cross-cluster replication follower index to a regular index. +// The API stops the following task associated with a follower index and removes +// index metadata and settings associated with cross-cluster replication. +// The follower index must be paused and closed before you call the unfollow +// API. +// +// > info +// > Currently cross-cluster replication does not support converting an existing +// regular index to a follower index. Converting a follower index to a regular +// index is an irreversible operation. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-unfollow.html func New(tp elastictransport.Interface) *Unfollow { @@ -292,7 +312,7 @@ func (r *Unfollow) Header(key, value string) *Unfollow { return r } -// Index The name of the follower index that should be turned into a regular index. +// Index The name of the follower index. // API Name: index func (r *Unfollow) _index(index string) *Unfollow { r.paramSet |= indexMask @@ -301,6 +321,17 @@ func (r *Unfollow) _index(index string) *Unfollow { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *Unfollow) MasterTimeout(duration string) *Unfollow { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/cluster/allocationexplain/allocation_explain.go b/typedapi/cluster/allocationexplain/allocation_explain.go index 009f67067e..9d168f8c08 100644 --- a/typedapi/cluster/allocationexplain/allocation_explain.go +++ b/typedapi/cluster/allocationexplain/allocation_explain.go @@ -16,9 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Provides explanations for shard allocations in the cluster. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Explain the shard allocations. +// Get explanations for shard allocations in the cluster. +// For unassigned shards, it provides an explanation for why the shard is +// unassigned. +// For assigned shards, it provides an explanation for why the shard is +// remaining on its current node and has not moved or rebalanced to another +// node. +// This API can be very useful when attempting to diagnose why a shard is +// unassigned or why a shard continues to remain on its current node when you +// might expect otherwise. package allocationexplain import ( @@ -73,7 +82,16 @@ func NewAllocationExplainFunc(tp elastictransport.Interface) NewAllocationExplai } } -// Provides explanations for shard allocations in the cluster. +// Explain the shard allocations. +// Get explanations for shard allocations in the cluster. +// For unassigned shards, it provides an explanation for why the shard is +// unassigned. +// For assigned shards, it provides an explanation for why the shard is +// remaining on its current node and has not moved or rebalanced to another +// node. +// This API can be very useful when attempting to diagnose why a shard is +// unassigned or why a shard continues to remain on its current node when you +// might expect otherwise. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-allocation-explain.html func New(tp elastictransport.Interface) *AllocationExplain { @@ -83,8 +101,6 @@ func New(tp elastictransport.Interface) *AllocationExplain { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -310,6 +326,14 @@ func (r *AllocationExplain) IncludeYesDecisions(includeyesdecisions bool) *Alloc return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *AllocationExplain) MasterTimeout(duration string) *AllocationExplain { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -354,35 +378,54 @@ func (r *AllocationExplain) Pretty(pretty bool) *AllocationExplain { return r } -// CurrentNode Specifies the node ID or the name of the node to only explain a shard that is +// Specifies the node ID or the name of the node to only explain a shard that is // currently located on the specified node. // API name: current_node func (r *AllocationExplain) CurrentNode(currentnode string) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.CurrentNode = ¤tnode return r } -// Index Specifies the name of the index that you would like an explanation for. +// Specifies the name of the index that you would like an explanation for. // API name: index func (r *AllocationExplain) Index(indexname string) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Index = &indexname return r } -// Primary If true, returns explanation for the primary shard for the given shard ID. +// If true, returns explanation for the primary shard for the given shard ID. // API name: primary func (r *AllocationExplain) Primary(primary bool) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Primary = &primary return r } -// Shard Specifies the ID of the shard that you would like an explanation for. +// Specifies the ID of the shard that you would like an explanation for. // API name: shard func (r *AllocationExplain) Shard(shard int) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Shard = &shard return r diff --git a/typedapi/cluster/allocationexplain/request.go b/typedapi/cluster/allocationexplain/request.go index 2288681e37..3dbbf4f174 100644 --- a/typedapi/cluster/allocationexplain/request.go +++ b/typedapi/cluster/allocationexplain/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package allocationexplain @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package allocationexplain // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L24-L61 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L25-L78 type Request struct { // CurrentNode Specifies the node ID or the name of the node to only explain a shard that is diff --git a/typedapi/cluster/allocationexplain/response.go b/typedapi/cluster/allocationexplain/response.go index 0e35aa0f98..01e4d64058 100644 --- a/typedapi/cluster/allocationexplain/response.go +++ b/typedapi/cluster/allocationexplain/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package allocationexplain @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package allocationexplain // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L64 type Response struct { AllocateExplanation *string `json:"allocate_explanation,omitempty"` AllocationDelay types.Duration `json:"allocation_delay,omitempty"` diff --git a/typedapi/cluster/deletecomponenttemplate/delete_component_template.go b/typedapi/cluster/deletecomponenttemplate/delete_component_template.go index db6a95466c..849e6022d2 100644 --- a/typedapi/cluster/deletecomponenttemplate/delete_component_template.go +++ b/typedapi/cluster/deletecomponenttemplate/delete_component_template.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete component templates. -// Deletes component templates. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. package deletecomponenttemplate @@ -80,7 +79,6 @@ func NewDeleteComponentTemplateFunc(tp elastictransport.Interface) NewDeleteComp } // Delete component templates. -// Deletes component templates. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // diff --git a/typedapi/cluster/deletecomponenttemplate/response.go b/typedapi/cluster/deletecomponenttemplate/response.go index a961c7dcab..15b87fe0bd 100644 --- a/typedapi/cluster/deletecomponenttemplate/response.go +++ b/typedapi/cluster/deletecomponenttemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletecomponenttemplate // Response holds the response body struct for the package deletecomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go b/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go index 088b777919..3a016cd30e 100644 --- a/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go +++ b/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Clears cluster voting config exclusions. +// Clear cluster voting config exclusions. +// Remove master-eligible nodes from the voting configuration exclusion list. package deletevotingconfigexclusions import ( @@ -66,7 +67,8 @@ func NewDeleteVotingConfigExclusionsFunc(tp elastictransport.Interface) NewDelet } } -// Clears cluster voting config exclusions. +// Clear cluster voting config exclusions. +// Remove master-eligible nodes from the voting configuration exclusion list. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html func New(tp elastictransport.Interface) *DeleteVotingConfigExclusions { @@ -225,6 +227,14 @@ func (r *DeleteVotingConfigExclusions) Header(key, value string) *DeleteVotingCo return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *DeleteVotingConfigExclusions) MasterTimeout(duration string) *DeleteVotingConfigExclusions { + r.values.Set("master_timeout", duration) + + return r +} + // WaitForRemoval Specifies whether to wait for all excluded nodes to be removed from the // cluster before clearing the voting configuration exclusions list. // Defaults to true, meaning that all excluded nodes must be removed from diff --git a/typedapi/cluster/existscomponenttemplate/exists_component_template.go b/typedapi/cluster/existscomponenttemplate/exists_component_template.go index fc0940c052..3c33eda03c 100644 --- a/typedapi/cluster/existscomponenttemplate/exists_component_template.go +++ b/typedapi/cluster/existscomponenttemplate/exists_component_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Check component templates. // Returns information about whether a particular component template exists. diff --git a/typedapi/cluster/getcomponenttemplate/get_component_template.go b/typedapi/cluster/getcomponenttemplate/get_component_template.go index 885e4427ab..ace6d59a37 100644 --- a/typedapi/cluster/getcomponenttemplate/get_component_template.go +++ b/typedapi/cluster/getcomponenttemplate/get_component_template.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get component templates. -// Retrieves information about component templates. +// Get information about component templates. package getcomponenttemplate import ( @@ -76,7 +76,7 @@ func NewGetComponentTemplateFunc(tp elastictransport.Interface) NewGetComponentT } // Get component templates. -// Retrieves information about component templates. +// Get information about component templates. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html func New(tp elastictransport.Interface) *GetComponentTemplate { diff --git a/typedapi/cluster/getcomponenttemplate/response.go b/typedapi/cluster/getcomponenttemplate/response.go index 5b68a97818..f749ec5ae3 100644 --- a/typedapi/cluster/getcomponenttemplate/response.go +++ b/typedapi/cluster/getcomponenttemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getcomponenttemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/get_component_template/ClusterGetComponentTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/get_component_template/ClusterGetComponentTemplateResponse.ts#L22-L24 type Response struct { ComponentTemplates []types.ClusterComponentTemplate `json:"component_templates"` } diff --git a/typedapi/cluster/getsettings/get_settings.go b/typedapi/cluster/getsettings/get_settings.go index a805b56e77..cbb36cb498 100644 --- a/typedapi/cluster/getsettings/get_settings.go +++ b/typedapi/cluster/getsettings/get_settings.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns cluster-wide settings. +// Get cluster-wide settings. // By default, it returns only settings that have been explicitly defined. package getsettings @@ -69,7 +69,7 @@ func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { } } -// Returns cluster-wide settings. +// Get cluster-wide settings. // By default, it returns only settings that have been explicitly defined. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-get-settings.html diff --git a/typedapi/cluster/getsettings/response.go b/typedapi/cluster/getsettings/response.go index c4224ec56f..5da4f50aa8 100644 --- a/typedapi/cluster/getsettings/response.go +++ b/typedapi/cluster/getsettings/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getsettings @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getsettings // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L29 type Response struct { Defaults map[string]json.RawMessage `json:"defaults,omitempty"` Persistent map[string]json.RawMessage `json:"persistent"` diff --git a/typedapi/cluster/health/health.go b/typedapi/cluster/health/health.go index 0c38f0c158..6c264b76a8 100644 --- a/typedapi/cluster/health/health.go +++ b/typedapi/cluster/health/health.go @@ -16,18 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// The cluster health API returns a simple status on the health of the cluster. +// Get the cluster health status. // You can also use the API to get the health status of only specified data -// streams and indices. For data streams, the API retrieves the health status of -// the stream’s backing indices. -// The cluster health status is: green, yellow or red. On the shard level, a red -// status indicates that the specific shard is not allocated in the cluster, -// yellow means that the primary shard is allocated but replicas are not, and -// green means that all shards are allocated. The index level status is -// controlled by the worst shard status. The cluster status is controlled by the -// worst index status. +// streams and indices. +// For data streams, the API retrieves the health status of the stream’s backing +// indices. +// +// The cluster health status is: green, yellow or red. +// On the shard level, a red status indicates that the specific shard is not +// allocated in the cluster. Yellow means that the primary shard is allocated +// but replicas are not. Green means that all shards are allocated. +// The index level status is controlled by the worst shard status. +// +// One of the main benefits of the API is the ability to wait until the cluster +// reaches a certain high watermark health level. +// The cluster status is controlled by the worst index status. package health import ( @@ -88,16 +93,21 @@ func NewHealthFunc(tp elastictransport.Interface) NewHealth { } } -// The cluster health API returns a simple status on the health of the cluster. +// Get the cluster health status. // You can also use the API to get the health status of only specified data -// streams and indices. For data streams, the API retrieves the health status of -// the stream’s backing indices. -// The cluster health status is: green, yellow or red. On the shard level, a red -// status indicates that the specific shard is not allocated in the cluster, -// yellow means that the primary shard is allocated but replicas are not, and -// green means that all shards are allocated. The index level status is -// controlled by the worst shard status. The cluster status is controlled by the -// worst index status. +// streams and indices. +// For data streams, the API retrieves the health status of the stream’s backing +// indices. +// +// The cluster health status is: green, yellow or red. +// On the shard level, a red status indicates that the specific shard is not +// allocated in the cluster. Yellow means that the primary shard is allocated +// but replicas are not. Green means that all shards are allocated. +// The index level status is controlled by the worst shard status. +// +// One of the main benefits of the API is the ability to wait until the cluster +// reaches a certain high watermark health level. +// The cluster status is controlled by the worst index status. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html func New(tp elastictransport.Interface) *Health { diff --git a/typedapi/cluster/health/response.go b/typedapi/cluster/health/response.go index 4dd5ca18ee..db4b85fc32 100644 --- a/typedapi/cluster/health/response.go +++ b/typedapi/cluster/health/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package health @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package health // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/health/ClusterHealthResponse.ts#L26-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/health/ClusterHealthResponse.ts#L26-L37 type Response struct { // ActivePrimaryShards The number of active primary shards. @@ -63,6 +63,8 @@ type Response struct { // TimedOut If false the response returned within the period of time that is specified by // the timeout parameter (30s by default) TimedOut bool `json:"timed_out"` + // UnassignedPrimaryShards The number of primary shards that are not allocated. + UnassignedPrimaryShards int `json:"unassigned_primary_shards"` // UnassignedShards The number of shards that are not allocated. UnassignedShards int `json:"unassigned_shards"` } diff --git a/typedapi/cluster/info/info.go b/typedapi/cluster/info/info.go index 4582d2f04d..4adc2ca8ae 100644 --- a/typedapi/cluster/info/info.go +++ b/typedapi/cluster/info/info.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get cluster info. // Returns basic information about the cluster. diff --git a/typedapi/cluster/info/response.go b/typedapi/cluster/info/response.go index 2b29a75125..0b81e0dde3 100644 --- a/typedapi/cluster/info/response.go +++ b/typedapi/cluster/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/info/ClusterInfoResponse.ts#L26-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/info/ClusterInfoResponse.ts#L26-L34 type Response struct { ClusterName string `json:"cluster_name"` Http *types.Http `json:"http,omitempty"` diff --git a/typedapi/cluster/pendingtasks/pending_tasks.go b/typedapi/cluster/pendingtasks/pending_tasks.go index ec2e5b2251..97f6dcc233 100644 --- a/typedapi/cluster/pendingtasks/pending_tasks.go +++ b/typedapi/cluster/pendingtasks/pending_tasks.go @@ -16,12 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns cluster-level changes (such as create index, update mapping, allocate -// or fail shard) that have not yet been executed. +// Get the pending cluster tasks. +// Get information about cluster-level changes (such as create index, update +// mapping, allocate or fail shard) that have not yet taken effect. +// // NOTE: This API returns a list of any pending updates to the cluster state. -// These are distinct from the tasks reported by the Task Management API which +// These are distinct from the tasks reported by the task management API which // include periodic tasks and tasks initiated by the user, such as node stats, // search queries, or create index requests. // However, if a user-initiated task such as a create index command causes a @@ -76,10 +78,12 @@ func NewPendingTasksFunc(tp elastictransport.Interface) NewPendingTasks { } } -// Returns cluster-level changes (such as create index, update mapping, allocate -// or fail shard) that have not yet been executed. +// Get the pending cluster tasks. +// Get information about cluster-level changes (such as create index, update +// mapping, allocate or fail shard) that have not yet taken effect. +// // NOTE: This API returns a list of any pending updates to the cluster state. -// These are distinct from the tasks reported by the Task Management API which +// These are distinct from the tasks reported by the task management API which // include periodic tasks and tasks initiated by the user, such as node stats, // search queries, or create index requests. // However, if a user-initiated task such as a create index command causes a diff --git a/typedapi/cluster/pendingtasks/response.go b/typedapi/cluster/pendingtasks/response.go index 947e531ac8..15101bf366 100644 --- a/typedapi/cluster/pendingtasks/response.go +++ b/typedapi/cluster/pendingtasks/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package pendingtasks @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package pendingtasks // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/pending_tasks/ClusterPendingTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/pending_tasks/ClusterPendingTasksResponse.ts#L22-L24 type Response struct { Tasks []types.PendingTask `json:"tasks"` } diff --git a/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go b/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go index 5da0908aa1..e5cbc00365 100644 --- a/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go +++ b/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go @@ -16,9 +16,47 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Updates the cluster voting config exclusions by node ids or node names. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Update voting configuration exclusions. +// Update the cluster voting config exclusions by node IDs or node names. +// By default, if there are more than three master-eligible nodes in the cluster +// and you remove fewer than half of the master-eligible nodes in the cluster at +// once, the voting configuration automatically shrinks. +// If you want to shrink the voting configuration to contain fewer than three +// nodes or to remove half or more of the master-eligible nodes in the cluster +// at once, use this API to remove departing nodes from the voting configuration +// manually. +// The API adds an entry for each specified node to the cluster’s voting +// configuration exclusions list. +// It then waits until the cluster has reconfigured its voting configuration to +// exclude the specified nodes. +// +// Clusters should have no voting configuration exclusions in normal operation. +// Once the excluded nodes have stopped, clear the voting configuration +// exclusions with `DELETE /_cluster/voting_config_exclusions`. +// This API waits for the nodes to be fully removed from the cluster before it +// returns. +// If your cluster has voting configuration exclusions for nodes that you no +// longer intend to remove, use `DELETE +// /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the +// voting configuration exclusions without waiting for the nodes to leave the +// cluster. +// +// A response to `POST /_cluster/voting_config_exclusions` with an HTTP status +// code of 200 OK guarantees that the node has been removed from the voting +// configuration and will not be reinstated until the voting configuration +// exclusions are cleared by calling `DELETE +// /_cluster/voting_config_exclusions`. +// If the call to `POST /_cluster/voting_config_exclusions` fails or returns a +// response with an HTTP status code other than 200 OK then the node may not +// have been removed from the voting configuration. +// In that case, you may safely retry the call. +// +// NOTE: Voting exclusions are required only when you remove at least half of +// the master-eligible nodes from a cluster in a short time period. +// They are not required when removing master-ineligible nodes or when removing +// fewer than half of the master-eligible nodes. package postvotingconfigexclusions import ( @@ -66,7 +104,45 @@ func NewPostVotingConfigExclusionsFunc(tp elastictransport.Interface) NewPostVot } } -// Updates the cluster voting config exclusions by node ids or node names. +// Update voting configuration exclusions. +// Update the cluster voting config exclusions by node IDs or node names. +// By default, if there are more than three master-eligible nodes in the cluster +// and you remove fewer than half of the master-eligible nodes in the cluster at +// once, the voting configuration automatically shrinks. +// If you want to shrink the voting configuration to contain fewer than three +// nodes or to remove half or more of the master-eligible nodes in the cluster +// at once, use this API to remove departing nodes from the voting configuration +// manually. +// The API adds an entry for each specified node to the cluster’s voting +// configuration exclusions list. +// It then waits until the cluster has reconfigured its voting configuration to +// exclude the specified nodes. +// +// Clusters should have no voting configuration exclusions in normal operation. +// Once the excluded nodes have stopped, clear the voting configuration +// exclusions with `DELETE /_cluster/voting_config_exclusions`. +// This API waits for the nodes to be fully removed from the cluster before it +// returns. +// If your cluster has voting configuration exclusions for nodes that you no +// longer intend to remove, use `DELETE +// /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the +// voting configuration exclusions without waiting for the nodes to leave the +// cluster. +// +// A response to `POST /_cluster/voting_config_exclusions` with an HTTP status +// code of 200 OK guarantees that the node has been removed from the voting +// configuration and will not be reinstated until the voting configuration +// exclusions are cleared by calling `DELETE +// /_cluster/voting_config_exclusions`. +// If the call to `POST /_cluster/voting_config_exclusions` fails or returns a +// response with an HTTP status code other than 200 OK then the node may not +// have been removed from the voting configuration. +// In that case, you may safely retry the call. +// +// NOTE: Voting exclusions are required only when you remove at least half of +// the master-eligible nodes from a cluster in a short time period. +// They are not required when removing master-ineligible nodes or when removing +// fewer than half of the master-eligible nodes. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html func New(tp elastictransport.Interface) *PostVotingConfigExclusions { @@ -244,6 +320,14 @@ func (r *PostVotingConfigExclusions) NodeIds(ids ...string) *PostVotingConfigExc return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PostVotingConfigExclusions) MasterTimeout(duration string) *PostVotingConfigExclusions { + r.values.Set("master_timeout", duration) + + return r +} + // Timeout When adding a voting configuration exclusion, the API waits for the // specified nodes to be excluded from the voting configuration before // returning. If the timeout expires before the appropriate condition diff --git a/typedapi/cluster/putcomponenttemplate/put_component_template.go b/typedapi/cluster/putcomponenttemplate/put_component_template.go index 77a739e5c8..30b91a3ddb 100644 --- a/typedapi/cluster/putcomponenttemplate/put_component_template.go +++ b/typedapi/cluster/putcomponenttemplate/put_component_template.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create or update a component template. -// Creates or updates a component template. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // @@ -42,6 +41,12 @@ // You can use C-style `/* *\/` block comments in component templates. // You can include comments anywhere in the request body except before the // opening curly bracket. +// +// **Applying component templates** +// +// You cannot directly apply a component template to a data stream or index. +// To be applied, a component template must be included in an index template's +// `composed_of` list. package putcomponenttemplate import ( @@ -105,7 +110,6 @@ func NewPutComponentTemplateFunc(tp elastictransport.Interface) NewPutComponentT } // Create or update a component template. -// Creates or updates a component template. // Component templates are building blocks for constructing index templates that // specify index mappings, settings, and aliases. // @@ -129,6 +133,12 @@ func NewPutComponentTemplateFunc(tp elastictransport.Interface) NewPutComponentT // You can include comments anywhere in the request body except before the // opening curly bracket. // +// **Applying component templates** +// +// You cannot directly apply a component template to a data stream or index. +// To be applied, a component template must be included in an index template's +// `composed_of` list. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html func New(tp elastictransport.Interface) *PutComponentTemplate { r := &PutComponentTemplate{ @@ -137,8 +147,6 @@ func New(tp elastictransport.Interface) *PutComponentTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -352,7 +360,7 @@ func (r *PutComponentTemplate) Header(key, value string) *PutComponentTemplate { // Name Name of the component template to create. // Elasticsearch includes the following built-in component templates: -// `logs-mappings`; 'logs-settings`; `metrics-mappings`; +// `logs-mappings`; `logs-settings`; `metrics-mappings`; // `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. // Elastic Agent uses these templates to configure backing indices for its data // streams. @@ -432,45 +440,64 @@ func (r *PutComponentTemplate) Pretty(pretty bool) *PutComponentTemplate { return r } -// Deprecated Marks this index template as deprecated. When creating or updating a +// Marks this index template as deprecated. When creating or updating a // non-deprecated index template // that uses deprecated components, Elasticsearch will emit a deprecation // warning. // API name: deprecated func (r *PutComponentTemplate) Deprecated(deprecated bool) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Deprecated = &deprecated return r } -// Meta_ Optional user metadata about the component template. -// May have any contents. This map is not automatically generated by +// Optional user metadata about the component template. +// It may have any contents. This map is not automatically generated by // Elasticsearch. // This information is stored in the cluster state, so keeping it short is // preferable. // To unset `_meta`, replace the template without specifying this information. // API name: _meta -func (r *PutComponentTemplate) Meta_(metadata types.Metadata) *PutComponentTemplate { - r.req.Meta_ = metadata +func (r *PutComponentTemplate) Meta_(metadata types.MetadataVariant) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// Template The template to be applied which includes mappings, settings, or aliases +// The template to be applied which includes mappings, settings, or aliases // configuration. // API name: template -func (r *PutComponentTemplate) Template(template *types.IndexState) *PutComponentTemplate { +func (r *PutComponentTemplate) Template(template types.IndexStateVariant) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Template = *template + r.req.Template = *template.IndexStateCaster() return r } -// Version Version number used to manage component templates externally. +// Version number used to manage component templates externally. // This number isn't automatically generated or incremented by Elasticsearch. // To unset a version, replace the template without specifying a version. // API name: version func (r *PutComponentTemplate) Version(versionnumber int64) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/cluster/putcomponenttemplate/request.go b/typedapi/cluster/putcomponenttemplate/request.go index cb466532a4..e34d0fcaf2 100644 --- a/typedapi/cluster/putcomponenttemplate/request.go +++ b/typedapi/cluster/putcomponenttemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putcomponenttemplate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putcomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L94 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L105 type Request struct { // Deprecated Marks this index template as deprecated. When creating or updating a @@ -42,7 +42,7 @@ type Request struct { // warning. Deprecated *bool `json:"deprecated,omitempty"` // Meta_ Optional user metadata about the component template. - // May have any contents. This map is not automatically generated by + // It may have any contents. This map is not automatically generated by // Elasticsearch. // This information is stored in the cluster state, so keeping it short is // preferable. diff --git a/typedapi/cluster/putcomponenttemplate/response.go b/typedapi/cluster/putcomponenttemplate/response.go index fa896d7b0b..040c25da18 100644 --- a/typedapi/cluster/putcomponenttemplate/response.go +++ b/typedapi/cluster/putcomponenttemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putcomponenttemplate // Response holds the response body struct for the package putcomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/cluster/putsettings/put_settings.go b/typedapi/cluster/putsettings/put_settings.go index 9990c33eff..5a7927b3f8 100644 --- a/typedapi/cluster/putsettings/put_settings.go +++ b/typedapi/cluster/putsettings/put_settings.go @@ -16,9 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the cluster settings. +// Update the cluster settings. +// Configure and update dynamic settings on a running cluster. +// You can also configure dynamic settings locally on an unstarted or shut down +// node in `elasticsearch.yml`. +// +// Updates made with this API can be persistent, which apply across cluster +// restarts, or transient, which reset after a cluster restart. +// You can also reset transient or persistent settings by assigning them a null +// value. +// +// If you configure the same setting using multiple methods, Elasticsearch +// applies the settings in following order of precedence: 1) Transient setting; +// 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting +// value. +// For example, you can apply a transient setting to override a persistent +// setting or `elasticsearch.yml` setting. +// However, a change to an `elasticsearch.yml` setting will not override a +// defined transient or persistent setting. +// +// TIP: In Elastic Cloud, use the user settings feature to configure all cluster +// settings. This method automatically rejects unsafe settings that could break +// your cluster. +// If you run Elasticsearch on your own hardware, use this API to configure +// dynamic cluster settings. +// Only use `elasticsearch.yml` for static cluster settings and node settings. +// The API doesn’t require a restart and ensures a setting’s value is the same +// on all nodes. +// +// WARNING: Transient cluster settings are no longer recommended. Use persistent +// cluster settings instead. +// If a cluster becomes unstable, transient settings can clear unexpectedly, +// resulting in a potentially undesired cluster configuration. package putsettings import ( @@ -73,7 +104,38 @@ func NewPutSettingsFunc(tp elastictransport.Interface) NewPutSettings { } } -// Updates the cluster settings. +// Update the cluster settings. +// Configure and update dynamic settings on a running cluster. +// You can also configure dynamic settings locally on an unstarted or shut down +// node in `elasticsearch.yml`. +// +// Updates made with this API can be persistent, which apply across cluster +// restarts, or transient, which reset after a cluster restart. +// You can also reset transient or persistent settings by assigning them a null +// value. +// +// If you configure the same setting using multiple methods, Elasticsearch +// applies the settings in following order of precedence: 1) Transient setting; +// 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting +// value. +// For example, you can apply a transient setting to override a persistent +// setting or `elasticsearch.yml` setting. +// However, a change to an `elasticsearch.yml` setting will not override a +// defined transient or persistent setting. +// +// TIP: In Elastic Cloud, use the user settings feature to configure all cluster +// settings. This method automatically rejects unsafe settings that could break +// your cluster. +// If you run Elasticsearch on your own hardware, use this API to configure +// dynamic cluster settings. +// Only use `elasticsearch.yml` for static cluster settings and node settings. +// The API doesn’t require a restart and ensures a setting’s value is the same +// on all nodes. +// +// WARNING: Transient cluster settings are no longer recommended. Use persistent +// cluster settings instead. +// If a cluster becomes unstable, transient settings can clear unexpectedly, +// resulting in a potentially undesired cluster configuration. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html func New(tp elastictransport.Interface) *PutSettings { @@ -83,8 +145,6 @@ func New(tp elastictransport.Interface) *PutSettings { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -362,16 +422,58 @@ func (r *PutSettings) Pretty(pretty bool) *PutSettings { // API name: persistent func (r *PutSettings) Persistent(persistent map[string]json.RawMessage) *PutSettings { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Persistent = persistent + return r +} + +func (r *PutSettings) AddPersistent(key string, value json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + var tmp map[string]json.RawMessage + if r.req.Persistent == nil { + r.req.Persistent = make(map[string]json.RawMessage) + } else { + tmp = r.req.Persistent + } + + tmp[key] = value + + r.req.Persistent = tmp return r } // API name: transient func (r *PutSettings) Transient(transient map[string]json.RawMessage) *PutSettings { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Transient = transient + return r +} + +func (r *PutSettings) AddTransient(key string, value json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Transient == nil { + r.req.Transient = make(map[string]json.RawMessage) + } else { + tmp = r.req.Transient + } + + tmp[key] = value + r.req.Transient = tmp return r } diff --git a/typedapi/cluster/putsettings/request.go b/typedapi/cluster/putsettings/request.go index d04547d6bc..ce2abad1ae 100644 --- a/typedapi/cluster/putsettings/request.go +++ b/typedapi/cluster/putsettings/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putsettings @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L67 type Request struct { Persistent map[string]json.RawMessage `json:"persistent,omitempty"` Transient map[string]json.RawMessage `json:"transient,omitempty"` diff --git a/typedapi/cluster/putsettings/response.go b/typedapi/cluster/putsettings/response.go index 236c911570..ad3b11dd1b 100644 --- a/typedapi/cluster/putsettings/response.go +++ b/typedapi/cluster/putsettings/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putsettings @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29 type Response struct { Acknowledged bool `json:"acknowledged"` Persistent map[string]json.RawMessage `json:"persistent"` diff --git a/typedapi/cluster/remoteinfo/remote_info.go b/typedapi/cluster/remoteinfo/remote_info.go index af64ba0fea..3f11e47528 100644 --- a/typedapi/cluster/remoteinfo/remote_info.go +++ b/typedapi/cluster/remoteinfo/remote_info.go @@ -16,11 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// The cluster remote info API allows you to retrieve all of the configured -// remote cluster information. It returns connection and endpoint information -// keyed by the configured remote cluster alias. +// Get remote cluster information. +// +// Get information about configured remote clusters. +// The API returns connection and endpoint information keyed by the configured +// remote cluster alias. +// +// > info +// > This API returns information that reflects current state on the local +// cluster. +// > The `connected` field does not necessarily reflect whether a remote cluster +// is down or unavailable, only whether there is currently an open connection to +// it. +// > Elasticsearch does not spontaneously try to reconnect to a disconnected +// remote cluster. +// > To trigger a reconnection, attempt a cross-cluster search, ES|QL +// cross-cluster search, or try the [resolve cluster +// endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). package remoteinfo import ( @@ -70,9 +84,23 @@ func NewRemoteInfoFunc(tp elastictransport.Interface) NewRemoteInfo { } } -// The cluster remote info API allows you to retrieve all of the configured -// remote cluster information. It returns connection and endpoint information -// keyed by the configured remote cluster alias. +// Get remote cluster information. +// +// Get information about configured remote clusters. +// The API returns connection and endpoint information keyed by the configured +// remote cluster alias. +// +// > info +// > This API returns information that reflects current state on the local +// cluster. +// > The `connected` field does not necessarily reflect whether a remote cluster +// is down or unavailable, only whether there is currently an open connection to +// it. +// > Elasticsearch does not spontaneously try to reconnect to a disconnected +// remote cluster. +// > To trigger a reconnection, attempt a cross-cluster search, ES|QL +// cross-cluster search, or try the [resolve cluster +// endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-remote-info.html func New(tp elastictransport.Interface) *RemoteInfo { diff --git a/typedapi/cluster/remoteinfo/response.go b/typedapi/cluster/remoteinfo/response.go index b83cda543c..3451ac5971 100644 --- a/typedapi/cluster/remoteinfo/response.go +++ b/typedapi/cluster/remoteinfo/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package remoteinfo @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package remoteinfo // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L24-L27 type Response map[string]types.ClusterRemoteInfo diff --git a/typedapi/cluster/reroute/request.go b/typedapi/cluster/reroute/request.go index be7cdb669d..6e9c933e15 100644 --- a/typedapi/cluster/reroute/request.go +++ b/typedapi/cluster/reroute/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package reroute @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package reroute // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/ClusterRerouteRequest.ts#L25-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/ClusterRerouteRequest.ts#L25-L91 type Request struct { // Commands Defines the commands to perform. diff --git a/typedapi/cluster/reroute/reroute.go b/typedapi/cluster/reroute/reroute.go index 4de201cafb..26f35ace2b 100644 --- a/typedapi/cluster/reroute/reroute.go +++ b/typedapi/cluster/reroute/reroute.go @@ -16,9 +16,37 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Allows to manually change the allocation of individual shards in the cluster. +// Reroute the cluster. +// Manually change the allocation of individual shards in the cluster. +// For example, a shard can be moved from one node to another explicitly, an +// allocation can be canceled, and an unassigned shard can be explicitly +// allocated to a specific node. +// +// It is important to note that after processing any reroute commands +// Elasticsearch will perform rebalancing as normal (respecting the values of +// settings such as `cluster.routing.rebalance.enable`) in order to remain in a +// balanced state. +// For example, if the requested allocation includes moving a shard from node1 +// to node2 then this may cause a shard to be moved from node2 back to node1 to +// even things out. +// +// The cluster can be set to disable allocations using the +// `cluster.routing.allocation.enable` setting. +// If allocations are disabled then the only allocations that will be performed +// are explicit ones given using the reroute command, and consequent allocations +// due to rebalancing. +// +// The cluster will attempt to allocate a shard a maximum of +// `index.allocation.max_retries` times in a row (defaults to `5`), before +// giving up and leaving the shard unallocated. +// This scenario can be caused by structural problems such as having an analyzer +// which refers to a stopwords file which doesn’t exist on all nodes. +// +// Once the problem has been corrected, allocation can be manually retried by +// calling the reroute API with the `?retry_failed` URI query parameter, which +// will attempt a single retry round for these shards. package reroute import ( @@ -73,7 +101,35 @@ func NewRerouteFunc(tp elastictransport.Interface) NewReroute { } } -// Allows to manually change the allocation of individual shards in the cluster. +// Reroute the cluster. +// Manually change the allocation of individual shards in the cluster. +// For example, a shard can be moved from one node to another explicitly, an +// allocation can be canceled, and an unassigned shard can be explicitly +// allocated to a specific node. +// +// It is important to note that after processing any reroute commands +// Elasticsearch will perform rebalancing as normal (respecting the values of +// settings such as `cluster.routing.rebalance.enable`) in order to remain in a +// balanced state. +// For example, if the requested allocation includes moving a shard from node1 +// to node2 then this may cause a shard to be moved from node2 back to node1 to +// even things out. +// +// The cluster can be set to disable allocations using the +// `cluster.routing.allocation.enable` setting. +// If allocations are disabled then the only allocations that will be performed +// are explicit ones given using the reroute command, and consequent allocations +// due to rebalancing. +// +// The cluster will attempt to allocate a shard a maximum of +// `index.allocation.max_retries` times in a row (defaults to `5`), before +// giving up and leaving the shard unallocated. +// This scenario can be caused by structural problems such as having an analyzer +// which refers to a stopwords file which doesn’t exist on all nodes. +// +// Once the problem has been corrected, allocation can be manually retried by +// calling the reroute API with the `?retry_failed` URI query parameter, which +// will attempt a single retry round for these shards. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html func New(tp elastictransport.Interface) *Reroute { @@ -83,8 +139,6 @@ func New(tp elastictransport.Interface) *Reroute { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -292,8 +346,11 @@ func (r *Reroute) Header(key, value string) *Reroute { return r } -// DryRun If true, then the request simulates the operation only and returns the -// resulting state. +// DryRun If true, then the request simulates the operation. +// It will calculate the result of applying the commands to the current cluster +// state and return the resulting cluster state after the commands (and +// rebalancing) have been applied; it will not actually perform the requested +// changes. // API name: dry_run func (r *Reroute) DryRun(dryrun bool) *Reroute { r.values.Set("dry_run", strconv.FormatBool(dryrun)) @@ -302,7 +359,7 @@ func (r *Reroute) DryRun(dryrun bool) *Reroute { } // Explain If true, then the response contains an explanation of why the commands can or -// cannot be executed. +// cannot run. // API name: explain func (r *Reroute) Explain(explain bool) *Reroute { r.values.Set("explain", strconv.FormatBool(explain)) @@ -389,10 +446,17 @@ func (r *Reroute) Pretty(pretty bool) *Reroute { return r } -// Commands Defines the commands to perform. +// Defines the commands to perform. // API name: commands -func (r *Reroute) Commands(commands ...types.Command) *Reroute { - r.req.Commands = commands +func (r *Reroute) Commands(commands ...types.CommandVariant) *Reroute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range commands { + + r.req.Commands = append(r.req.Commands, *v.CommandCaster()) + } return r } diff --git a/typedapi/cluster/reroute/response.go b/typedapi/cluster/reroute/response.go index efdd1de7cf..d01b18fa8d 100644 --- a/typedapi/cluster/reroute/response.go +++ b/typedapi/cluster/reroute/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package reroute @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package reroute // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/ClusterRerouteResponse.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/ClusterRerouteResponse.ts#L23-L34 type Response struct { Acknowledged bool `json:"acknowledged"` Explanations []types.RerouteExplanation `json:"explanations,omitempty"` diff --git a/typedapi/cluster/state/response.go b/typedapi/cluster/state/response.go index d0d26f5060..fe53782621 100644 --- a/typedapi/cluster/state/response.go +++ b/typedapi/cluster/state/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package state @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package state // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/state/ClusterStateResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/state/ClusterStateResponse.ts#L22-L29 type Response = json.RawMessage diff --git a/typedapi/cluster/state/state.go b/typedapi/cluster/state/state.go index 72bd665f0a..aa54eb7a45 100644 --- a/typedapi/cluster/state/state.go +++ b/typedapi/cluster/state/state.go @@ -16,9 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns a comprehensive information about the state of the cluster. +// Get the cluster state. +// Get comprehensive information about the state of the cluster. +// +// The cluster state is an internal data structure which keeps track of a +// variety of information needed by every node, including the identity and +// attributes of the other nodes in the cluster; cluster-wide settings; index +// metadata, including the mapping and settings for each index; the location and +// status of every shard copy in the cluster. +// +// The elected master node ensures that every node in the cluster has a copy of +// the same cluster state. +// This API lets you retrieve a representation of this internal state for +// debugging or diagnostic purposes. +// You may need to consult the Elasticsearch source code to determine the +// precise meaning of the response. +// +// By default the API will route requests to the elected master node since this +// node is the authoritative source of cluster states. +// You can also retrieve the cluster state held on the node handling the API +// request by adding the `?local=true` query parameter. +// +// Elasticsearch may need to expend significant effort to compute a response to +// this API in larger clusters, and the response may comprise a very large +// quantity of data. +// If you use this API repeatedly, your cluster may become unstable. +// +// WARNING: The response is a representation of an internal data structure. +// Its format is not subject to the same compatibility guarantees as other more +// stable APIs and may change from version to version. +// Do not query this API using external monitoring tools. +// Instead, obtain the information you require using other more stable cluster +// APIs. package state import ( @@ -78,7 +109,38 @@ func NewStateFunc(tp elastictransport.Interface) NewState { } } -// Returns a comprehensive information about the state of the cluster. +// Get the cluster state. +// Get comprehensive information about the state of the cluster. +// +// The cluster state is an internal data structure which keeps track of a +// variety of information needed by every node, including the identity and +// attributes of the other nodes in the cluster; cluster-wide settings; index +// metadata, including the mapping and settings for each index; the location and +// status of every shard copy in the cluster. +// +// The elected master node ensures that every node in the cluster has a copy of +// the same cluster state. +// This API lets you retrieve a representation of this internal state for +// debugging or diagnostic purposes. +// You may need to consult the Elasticsearch source code to determine the +// precise meaning of the response. +// +// By default the API will route requests to the elected master node since this +// node is the authoritative source of cluster states. +// You can also retrieve the cluster state held on the node handling the API +// request by adding the `?local=true` query parameter. +// +// Elasticsearch may need to expend significant effort to compute a response to +// this API in larger clusters, and the response may comprise a very large +// quantity of data. +// If you use this API repeatedly, your cluster may become unstable. +// +// WARNING: The response is a representation of an internal data structure. +// Its format is not subject to the same compatibility guarantees as other more +// stable APIs and may change from version to version. +// Do not query this API using external monitoring tools. +// Instead, obtain the information you require using other more stable cluster +// APIs. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html func New(tp elastictransport.Interface) *State { diff --git a/typedapi/cluster/stats/response.go b/typedapi/cluster/stats/response.go index 728e8a1d7f..b4845e4602 100644 --- a/typedapi/cluster/stats/response.go +++ b/typedapi/cluster/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stats @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/ClusterStatsResponse.ts#L53-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/ClusterStatsResponse.ts#L53-L55 type Response struct { // ClusterName Name of the cluster, based on the cluster name setting. diff --git a/typedapi/cluster/stats/stats.go b/typedapi/cluster/stats/stats.go index 13335e3891..7eb72510ad 100644 --- a/typedapi/cluster/stats/stats.go +++ b/typedapi/cluster/stats/stats.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns cluster statistics. -// It returns basic index metrics (shard numbers, store size, memory usage) and +// Get cluster statistics. +// Get basic index metrics (shard numbers, store size, memory usage) and // information about the current nodes that form the cluster (number, roles, os, // jvm versions, memory usage, cpu and installed plugins). package stats @@ -77,8 +77,8 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Returns cluster statistics. -// It returns basic index metrics (shard numbers, store size, memory usage) and +// Get cluster statistics. +// Get basic index metrics (shard numbers, store size, memory usage) and // information about the current nodes that form the cluster (number, roles, os, // jvm versions, memory usage, cpu and installed plugins). // @@ -313,10 +313,10 @@ func (r *Stats) NodeId(nodeid string) *Stats { return r } -// FlatSettings If `true`, returns settings in flat format. -// API name: flat_settings -func (r *Stats) FlatSettings(flatsettings bool) *Stats { - r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) +// IncludeRemotes Include remote cluster data into the response +// API name: include_remotes +func (r *Stats) IncludeRemotes(includeremotes bool) *Stats { + r.values.Set("include_remotes", strconv.FormatBool(includeremotes)) return r } diff --git a/typedapi/connector/checkin/check_in.go b/typedapi/connector/checkin/check_in.go index f3f10a66f4..b4bf467ac2 100644 --- a/typedapi/connector/checkin/check_in.go +++ b/typedapi/connector/checkin/check_in.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the last_seen field in the connector, and sets it to current -// timestamp +// Check in a connector. +// +// Update the `last_seen` field in the connector and set it to the current +// timestamp. package checkin import ( @@ -77,8 +79,10 @@ func NewCheckInFunc(tp elastictransport.Interface) NewCheckIn { } } -// Updates the last_seen field in the connector, and sets it to current -// timestamp +// Check in a connector. +// +// Update the `last_seen` field in the connector and set it to the current +// timestamp. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/check-in-connector-api.html func New(tp elastictransport.Interface) *CheckIn { diff --git a/typedapi/connector/checkin/response.go b/typedapi/connector/checkin/response.go index 033157edae..d3a33c7abc 100644 --- a/typedapi/connector/checkin/response.go +++ b/typedapi/connector/checkin/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package checkin @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package checkin // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/check_in/ConnectorCheckInResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/check_in/ConnectorCheckInResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/delete/delete.go b/typedapi/connector/delete/delete.go index 6fb57a18ff..9a3fb70b56 100644 --- a/typedapi/connector/delete/delete.go +++ b/typedapi/connector/delete/delete.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a connector. +// Delete a connector. +// +// Removes a connector and associated sync jobs. +// This is a destructive action that is not recoverable. +// NOTE: This action doesn’t delete any API keys, ingest pipelines, or data +// indices associated with the connector. +// These need to be removed manually. package delete import ( @@ -76,7 +82,13 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } } -// Deletes a connector. +// Delete a connector. +// +// Removes a connector and associated sync jobs. +// This is a destructive action that is not recoverable. +// NOTE: This action doesn’t delete any API keys, ingest pipelines, or data +// indices associated with the connector. +// These need to be removed manually. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-api.html func New(tp elastictransport.Interface) *Delete { diff --git a/typedapi/connector/delete/response.go b/typedapi/connector/delete/response.go index d336937769..91d9f0d6e6 100644 --- a/typedapi/connector/delete/response.go +++ b/typedapi/connector/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/delete/ConnectorDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/delete/ConnectorDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/connector/get/get.go b/typedapi/connector/get/get.go index 6083207b4e..bb46f80164 100644 --- a/typedapi/connector/get/get.go +++ b/typedapi/connector/get/get.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves a connector. +// Get a connector. +// +// Get the details about a connector. package get import ( @@ -76,7 +78,9 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } } -// Retrieves a connector. +// Get a connector. +// +// Get the details about a connector. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-api.html func New(tp elastictransport.Interface) *Get { diff --git a/typedapi/connector/get/response.go b/typedapi/connector/get/response.go index acb90b31a9..ebaef1c4e2 100644 --- a/typedapi/connector/get/response.go +++ b/typedapi/connector/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -35,7 +35,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/get/ConnectorGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/get/ConnectorGetResponse.ts#L22-L24 type Response struct { ApiKeyId *string `json:"api_key_id,omitempty"` ApiKeySecretId *string `json:"api_key_secret_id,omitempty"` diff --git a/typedapi/connector/lastsync/last_sync.go b/typedapi/connector/lastsync/last_sync.go index cd6888cef1..d950e76a42 100644 --- a/typedapi/connector/lastsync/last_sync.go +++ b/typedapi/connector/lastsync/last_sync.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates last sync stats in the connector document +// Update the connector last sync stats. +// +// Update the fields related to the last sync of a connector. +// This action is used for analytics and monitoring. package lastsync import ( @@ -82,7 +85,10 @@ func NewLastSyncFunc(tp elastictransport.Interface) NewLastSync { } } -// Updates last sync stats in the connector document +// Update the connector last sync stats. +// +// Update the fields related to the last sync of a connector. +// This action is used for analytics and monitoring. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-last-sync-api.html func New(tp elastictransport.Interface) *LastSync { @@ -92,8 +98,6 @@ func New(tp elastictransport.Interface) *LastSync { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -362,6 +366,10 @@ func (r *LastSync) Pretty(pretty bool) *LastSync { // API name: last_access_control_sync_error func (r *LastSync) LastAccessControlSyncError(lastaccesscontrolsyncerror string) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastAccessControlSyncError = &lastaccesscontrolsyncerror @@ -369,21 +377,33 @@ func (r *LastSync) LastAccessControlSyncError(lastaccesscontrolsyncerror string) } // API name: last_access_control_sync_scheduled_at -func (r *LastSync) LastAccessControlSyncScheduledAt(datetime types.DateTime) *LastSync { - r.req.LastAccessControlSyncScheduledAt = datetime +func (r *LastSync) LastAccessControlSyncScheduledAt(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastAccessControlSyncScheduledAt = *datetime.DateTimeCaster() return r } // API name: last_access_control_sync_status func (r *LastSync) LastAccessControlSyncStatus(lastaccesscontrolsyncstatus syncstatus.SyncStatus) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastAccessControlSyncStatus = &lastaccesscontrolsyncstatus - return r } // API name: last_deleted_document_count func (r *LastSync) LastDeletedDocumentCount(lastdeleteddocumentcount int64) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastDeletedDocumentCount = &lastdeleteddocumentcount @@ -391,14 +411,23 @@ func (r *LastSync) LastDeletedDocumentCount(lastdeleteddocumentcount int64) *Las } // API name: last_incremental_sync_scheduled_at -func (r *LastSync) LastIncrementalSyncScheduledAt(datetime types.DateTime) *LastSync { - r.req.LastIncrementalSyncScheduledAt = datetime +func (r *LastSync) LastIncrementalSyncScheduledAt(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastIncrementalSyncScheduledAt = *datetime.DateTimeCaster() return r } // API name: last_indexed_document_count func (r *LastSync) LastIndexedDocumentCount(lastindexeddocumentcount int64) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastIndexedDocumentCount = &lastindexeddocumentcount @@ -406,14 +435,23 @@ func (r *LastSync) LastIndexedDocumentCount(lastindexeddocumentcount int64) *Las } // API name: last_seen -func (r *LastSync) LastSeen(datetime types.DateTime) *LastSync { - r.req.LastSeen = datetime +func (r *LastSync) LastSeen(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSeen = *datetime.DateTimeCaster() return r } // API name: last_sync_error func (r *LastSync) LastSyncError(lastsyncerror string) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastSyncError = &lastsyncerror @@ -421,32 +459,45 @@ func (r *LastSync) LastSyncError(lastsyncerror string) *LastSync { } // API name: last_sync_scheduled_at -func (r *LastSync) LastSyncScheduledAt(datetime types.DateTime) *LastSync { - r.req.LastSyncScheduledAt = datetime +func (r *LastSync) LastSyncScheduledAt(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSyncScheduledAt = *datetime.DateTimeCaster() return r } // API name: last_sync_status func (r *LastSync) LastSyncStatus(lastsyncstatus syncstatus.SyncStatus) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LastSyncStatus = &lastsyncstatus - return r } // API name: last_synced -func (r *LastSync) LastSynced(datetime types.DateTime) *LastSync { - r.req.LastSynced = datetime +func (r *LastSync) LastSynced(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSynced = *datetime.DateTimeCaster() return r } // API name: sync_cursor -// -// synccursor should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *LastSync) SyncCursor(synccursor any) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := synccursor.(type) { case json.RawMessage: r.req.SyncCursor = casted @@ -460,6 +511,5 @@ func (r *LastSync) SyncCursor(synccursor any) *LastSync { return nil }) } - return r } diff --git a/typedapi/connector/lastsync/request.go b/typedapi/connector/lastsync/request.go index 7996a4d5de..bae4df2b6d 100644 --- a/typedapi/connector/lastsync/request.go +++ b/typedapi/connector/lastsync/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package lastsync @@ -34,7 +34,7 @@ import ( // Request holds the request body struct for the package lastsync // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/last_sync/ConnectorUpdateLastSyncRequest.ts#L26-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/last_sync/ConnectorUpdateLastSyncRequest.ts#L26-L66 type Request struct { LastAccessControlSyncError *string `json:"last_access_control_sync_error,omitempty"` LastAccessControlSyncScheduledAt types.DateTime `json:"last_access_control_sync_scheduled_at,omitempty"` diff --git a/typedapi/connector/lastsync/response.go b/typedapi/connector/lastsync/response.go index ef854eaf93..4493e10f65 100644 --- a/typedapi/connector/lastsync/response.go +++ b/typedapi/connector/lastsync/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package lastsync @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package lastsync // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/last_sync/ConnectorUpdateLastSyncResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/last_sync/ConnectorUpdateLastSyncResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/list/list.go b/typedapi/connector/list/list.go index 79af582f01..8f54bfa917 100644 --- a/typedapi/connector/list/list.go +++ b/typedapi/connector/list/list.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns existing connectors. +// Get all connectors. +// +// Get information about all connectors. package list import ( @@ -68,7 +70,9 @@ func NewListFunc(tp elastictransport.Interface) NewList { } } -// Returns existing connectors. +// Get all connectors. +// +// Get information about all connectors. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-connector-api.html func New(tp elastictransport.Interface) *List { diff --git a/typedapi/connector/list/response.go b/typedapi/connector/list/response.go index a7da6d2845..7df3f2b75e 100644 --- a/typedapi/connector/list/response.go +++ b/typedapi/connector/list/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package list @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package list // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/list/ConnectorListResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/list/ConnectorListResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Results []types.Connector `json:"results"` diff --git a/typedapi/connector/post/post.go b/typedapi/connector/post/post.go index e356a03384..1df81c621e 100644 --- a/typedapi/connector/post/post.go +++ b/typedapi/connector/post/post.go @@ -16,9 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a connector. +// Create a connector. +// +// Connectors are Elasticsearch integrations that bring content from third-party +// data sources, which can be deployed on Elastic Cloud or hosted on your own +// infrastructure. +// Elastic managed connectors (Native connectors) are a managed service on +// Elastic Cloud. +// Self-managed connectors (Connector clients) are self-managed on your +// infrastructure. package post import ( @@ -73,7 +81,15 @@ func NewPostFunc(tp elastictransport.Interface) NewPost { } } -// Creates a connector. +// Create a connector. +// +// Connectors are Elasticsearch integrations that bring content from third-party +// data sources, which can be deployed on Elastic Cloud or hosted on your own +// infrastructure. +// Elastic managed connectors (Native connectors) are a managed service on +// Elastic Cloud. +// Self-managed connectors (Connector clients) are self-managed on your +// infrastructure. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html func New(tp elastictransport.Interface) *Post { @@ -83,8 +99,6 @@ func New(tp elastictransport.Interface) *Post { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -336,6 +350,10 @@ func (r *Post) Pretty(pretty bool) *Post { // API name: description func (r *Post) Description(description string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description @@ -344,6 +362,11 @@ func (r *Post) Description(description string) *Post { // API name: index_name func (r *Post) IndexName(indexname string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexName = &indexname return r @@ -351,6 +374,11 @@ func (r *Post) IndexName(indexname string) *Post { // API name: is_native func (r *Post) IsNative(isnative bool) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IsNative = &isnative return r @@ -358,6 +386,10 @@ func (r *Post) IsNative(isnative bool) *Post { // API name: language func (r *Post) Language(language string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Language = &language @@ -366,6 +398,10 @@ func (r *Post) Language(language string) *Post { // API name: name func (r *Post) Name(name string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Name = &name @@ -374,6 +410,10 @@ func (r *Post) Name(name string) *Post { // API name: service_type func (r *Post) ServiceType(servicetype string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ServiceType = &servicetype diff --git a/typedapi/connector/post/request.go b/typedapi/connector/post/request.go index b53c926e3e..7f96b8f957 100644 --- a/typedapi/connector/post/request.go +++ b/typedapi/connector/post/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package post @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/post/ConnectorPostRequest.ts#L22-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/post/ConnectorPostRequest.ts#L22-L52 type Request struct { Description *string `json:"description,omitempty"` IndexName *string `json:"index_name,omitempty"` diff --git a/typedapi/connector/post/response.go b/typedapi/connector/post/response.go index c1d202b2c7..ec88b834e3 100644 --- a/typedapi/connector/post/response.go +++ b/typedapi/connector/post/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package post @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/post/ConnectorPostResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/post/ConnectorPostResponse.ts#L23-L28 type Response struct { Id string `json:"id"` Result result.Result `json:"result"` diff --git a/typedapi/connector/put/put.go b/typedapi/connector/put/put.go index f49bb3c212..d41265fb65 100644 --- a/typedapi/connector/put/put.go +++ b/typedapi/connector/put/put.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a connector. +// Create or update a connector. package put import ( @@ -79,7 +79,7 @@ func NewPutFunc(tp elastictransport.Interface) NewPut { } } -// Creates or updates a connector. +// Create or update a connector. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html func New(tp elastictransport.Interface) *Put { @@ -89,8 +89,6 @@ func New(tp elastictransport.Interface) *Put { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -363,6 +361,10 @@ func (r *Put) Pretty(pretty bool) *Put { // API name: description func (r *Put) Description(description string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description @@ -371,6 +373,11 @@ func (r *Put) Description(description string) *Put { // API name: index_name func (r *Put) IndexName(indexname string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexName = &indexname return r @@ -378,6 +385,11 @@ func (r *Put) IndexName(indexname string) *Put { // API name: is_native func (r *Put) IsNative(isnative bool) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IsNative = &isnative return r @@ -385,6 +397,10 @@ func (r *Put) IsNative(isnative bool) *Put { // API name: language func (r *Put) Language(language string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Language = &language @@ -393,6 +409,10 @@ func (r *Put) Language(language string) *Put { // API name: name func (r *Put) Name(name string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Name = &name @@ -401,6 +421,10 @@ func (r *Put) Name(name string) *Put { // API name: service_type func (r *Put) ServiceType(servicetype string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ServiceType = &servicetype diff --git a/typedapi/connector/put/request.go b/typedapi/connector/put/request.go index 5345bf652f..ee6d5c5a61 100644 --- a/typedapi/connector/put/request.go +++ b/typedapi/connector/put/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package put @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/put/ConnectorPutRequest.ts#L22-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/put/ConnectorPutRequest.ts#L22-L58 type Request struct { Description *string `json:"description,omitempty"` IndexName *string `json:"index_name,omitempty"` diff --git a/typedapi/connector/put/response.go b/typedapi/connector/put/response.go index f89b03661c..aacae14966 100644 --- a/typedapi/connector/put/response.go +++ b/typedapi/connector/put/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package put @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/put/ConnectorPutResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/put/ConnectorPutResponse.ts#L23-L28 type Response struct { Id string `json:"id"` Result result.Result `json:"result"` diff --git a/typedapi/connector/secretpost/secret_post.go b/typedapi/connector/secretpost/secret_post.go index b72f8696c5..9674b05a39 100644 --- a/typedapi/connector/secretpost/secret_post.go +++ b/typedapi/connector/secretpost/secret_post.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Creates a secret for a Connector. package secretpost diff --git a/typedapi/connector/syncjobcancel/response.go b/typedapi/connector/syncjobcancel/response.go index 6e69300995..b7d8d675b7 100644 --- a/typedapi/connector/syncjobcancel/response.go +++ b/typedapi/connector/syncjobcancel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package syncjobcancel @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package syncjobcancel // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/sync_job_cancel/SyncJobCancelResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_cancel/SyncJobCancelResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/syncjobcancel/sync_job_cancel.go b/typedapi/connector/syncjobcancel/sync_job_cancel.go index 0eba100d15..9cfff8af84 100644 --- a/typedapi/connector/syncjobcancel/sync_job_cancel.go +++ b/typedapi/connector/syncjobcancel/sync_job_cancel.go @@ -16,9 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Cancels a connector sync job. +// Cancel a connector sync job. +// +// Cancel a connector sync job, which sets the status to cancelling and updates +// `cancellation_requested_at` to the current time. +// The connector service is then responsible for setting the status of connector +// sync jobs to cancelled. package syncjobcancel import ( @@ -76,7 +81,12 @@ func NewSyncJobCancelFunc(tp elastictransport.Interface) NewSyncJobCancel { } } -// Cancels a connector sync job. +// Cancel a connector sync job. +// +// Cancel a connector sync job, which sets the status to cancelling and updates +// `cancellation_requested_at` to the current time. +// The connector service is then responsible for setting the status of connector +// sync jobs to cancelled. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cancel-connector-sync-job-api.html func New(tp elastictransport.Interface) *SyncJobCancel { diff --git a/typedapi/connector/syncjobcheckin/response.go b/typedapi/connector/syncjobcheckin/response.go new file mode 100644 index 0000000000..840c513bfd --- /dev/null +++ b/typedapi/connector/syncjobcheckin/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package syncjobcheckin + +// Response holds the response body struct for the package syncjobcheckin +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_check_in/SyncJobCheckInResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/connector/syncjobcheckin/sync_job_check_in.go b/typedapi/connector/syncjobcheckin/sync_job_check_in.go new file mode 100644 index 0000000000..36381696de --- /dev/null +++ b/typedapi/connector/syncjobcheckin/sync_job_check_in.go @@ -0,0 +1,360 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Check in a connector sync job. +// Check in a connector sync job and set the `last_seen` field to the current +// time before updating it in the internal index. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjobcheckin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobCheckIn struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobCheckIn type alias for index. +type NewSyncJobCheckIn func(connectorsyncjobid string) *SyncJobCheckIn + +// NewSyncJobCheckInFunc returns a new instance of SyncJobCheckIn with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobCheckInFunc(tp elastictransport.Interface) NewSyncJobCheckIn { + return func(connectorsyncjobid string) *SyncJobCheckIn { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Check in a connector sync job. +// Check in a connector sync job and set the `last_seen` field to the current +// time before updating it in the internal index. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/check-in-connector-sync-job-api.html +func New(tp elastictransport.Interface) *SyncJobCheckIn { + r := &SyncJobCheckIn{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobCheckIn) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_check_in") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobCheckIn) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_check_in") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_check_in", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_check_in") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobCheckIn query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobcheckin.Response +func (r SyncJobCheckIn) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SyncJobCheckIn) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SyncJobCheckIn query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SyncJobCheckIn headers map. +func (r *SyncJobCheckIn) Header(key, value string) *SyncJobCheckIn { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job to be checked in. +// API Name: connectorsyncjobid +func (r *SyncJobCheckIn) _connectorsyncjobid(connectorsyncjobid string) *SyncJobCheckIn { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobCheckIn) ErrorTrace(errortrace bool) *SyncJobCheckIn { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobCheckIn) FilterPath(filterpaths ...string) *SyncJobCheckIn { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobCheckIn) Human(human bool) *SyncJobCheckIn { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobCheckIn) Pretty(pretty bool) *SyncJobCheckIn { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/connector/syncjobclaim/request.go b/typedapi/connector/syncjobclaim/request.go new file mode 100644 index 0000000000..2d91dc0e90 --- /dev/null +++ b/typedapi/connector/syncjobclaim/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package syncjobclaim + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package syncjobclaim +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_claim/SyncJobClaimRequest.ts#L23-L61 +type Request struct { + + // SyncCursor The cursor object from the last incremental sync job. + // This should reference the `sync_cursor` field in the connector state for + // which the job runs. + SyncCursor json.RawMessage `json:"sync_cursor,omitempty"` + // WorkerHostname The host name of the current system that will run the job. + WorkerHostname string `json:"worker_hostname"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjobclaim request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/connector/syncjobclaim/response.go b/typedapi/connector/syncjobclaim/response.go new file mode 100644 index 0000000000..9cbc21beb9 --- /dev/null +++ b/typedapi/connector/syncjobclaim/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package syncjobclaim + +// Response holds the response body struct for the package syncjobclaim +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_claim/SyncJobClaimResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/connector/syncjobclaim/sync_job_claim.go b/typedapi/connector/syncjobclaim/sync_job_claim.go new file mode 100644 index 0000000000..4f6e64740e --- /dev/null +++ b/typedapi/connector/syncjobclaim/sync_job_claim.go @@ -0,0 +1,422 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Claim a connector sync job. +// This action updates the job status to `in_progress` and sets the `last_seen` +// and `started_at` timestamps to the current time. +// Additionally, it can set the `sync_cursor` property for the sync job. +// +// This API is not intended for direct connector management by users. +// It supports the implementation of services that utilize the connector +// protocol to communicate with Elasticsearch. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjobclaim + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobClaim struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobClaim type alias for index. +type NewSyncJobClaim func(connectorsyncjobid string) *SyncJobClaim + +// NewSyncJobClaimFunc returns a new instance of SyncJobClaim with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobClaimFunc(tp elastictransport.Interface) NewSyncJobClaim { + return func(connectorsyncjobid string) *SyncJobClaim { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Claim a connector sync job. +// This action updates the job status to `in_progress` and sets the `last_seen` +// and `started_at` timestamps to the current time. +// Additionally, it can set the `sync_cursor` property for the sync job. +// +// This API is not intended for direct connector management by users. +// It supports the implementation of services that utilize the connector +// protocol to communicate with Elasticsearch. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/claim-connector-sync-job-api.html +func New(tp elastictransport.Interface) *SyncJobClaim { + r := &SyncJobClaim{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobClaim) Raw(raw io.Reader) *SyncJobClaim { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobClaim) Request(req *Request) *SyncJobClaim { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobClaim) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobClaim: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_claim") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobClaim) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_claim") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_claim") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_claim", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_claim") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobClaim query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobclaim.Response +func (r SyncJobClaim) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_claim") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobClaim headers map. +func (r *SyncJobClaim) Header(key, value string) *SyncJobClaim { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job. +// API Name: connectorsyncjobid +func (r *SyncJobClaim) _connectorsyncjobid(connectorsyncjobid string) *SyncJobClaim { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobClaim) ErrorTrace(errortrace bool) *SyncJobClaim { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobClaim) FilterPath(filterpaths ...string) *SyncJobClaim { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobClaim) Human(human bool) *SyncJobClaim { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobClaim) Pretty(pretty bool) *SyncJobClaim { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The cursor object from the last incremental sync job. +// This should reference the `sync_cursor` field in the connector state for +// which the job runs. +// API name: sync_cursor +func (r *SyncJobClaim) SyncCursor(synccursor any) *SyncJobClaim { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + switch casted := synccursor.(type) { + case json.RawMessage: + r.req.SyncCursor = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(synccursor) + if err != nil { + return err + } + r.req.SyncCursor = data + return nil + }) + } + return r +} + +// The host name of the current system that will run the job. +// API name: worker_hostname +func (r *SyncJobClaim) WorkerHostname(workerhostname string) *SyncJobClaim { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WorkerHostname = workerhostname + + return r +} diff --git a/typedapi/connector/syncjobdelete/response.go b/typedapi/connector/syncjobdelete/response.go index eff1c4ab26..65697400f3 100644 --- a/typedapi/connector/syncjobdelete/response.go +++ b/typedapi/connector/syncjobdelete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package syncjobdelete // Response holds the response body struct for the package syncjobdelete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/sync_job_delete/SyncJobDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_delete/SyncJobDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/connector/syncjobdelete/sync_job_delete.go b/typedapi/connector/syncjobdelete/sync_job_delete.go index 7b31154493..b94180e65d 100644 --- a/typedapi/connector/syncjobdelete/sync_job_delete.go +++ b/typedapi/connector/syncjobdelete/sync_job_delete.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a connector sync job. +// Delete a connector sync job. +// +// Remove a connector sync job and its associated data. +// This is a destructive action that is not recoverable. package syncjobdelete import ( @@ -76,7 +79,10 @@ func NewSyncJobDeleteFunc(tp elastictransport.Interface) NewSyncJobDelete { } } -// Deletes a connector sync job. +// Delete a connector sync job. +// +// Remove a connector sync job and its associated data. +// This is a destructive action that is not recoverable. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-sync-job-api.html func New(tp elastictransport.Interface) *SyncJobDelete { diff --git a/typedapi/connector/syncjoberror/request.go b/typedapi/connector/syncjoberror/request.go new file mode 100644 index 0000000000..91d1105372 --- /dev/null +++ b/typedapi/connector/syncjoberror/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package syncjoberror + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package syncjoberror +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_error/SyncJobErrorRequest.ts#L23-L52 +type Request struct { + + // Error The error for the connector sync job error field. + Error string `json:"error"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjoberror request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/connector/syncjoberror/response.go b/typedapi/connector/syncjoberror/response.go new file mode 100644 index 0000000000..c37567fc4e --- /dev/null +++ b/typedapi/connector/syncjoberror/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package syncjoberror + +// Response holds the response body struct for the package syncjoberror +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_error/SyncJobErrorResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/connector/syncjoberror/sync_job_error.go b/typedapi/connector/syncjoberror/sync_job_error.go new file mode 100644 index 0000000000..0e96ee7300 --- /dev/null +++ b/typedapi/connector/syncjoberror/sync_job_error.go @@ -0,0 +1,387 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Set a connector sync job error. +// Set the `error` field for a connector sync job and set its `status` to +// `error`. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjoberror + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobError struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobError type alias for index. +type NewSyncJobError func(connectorsyncjobid string) *SyncJobError + +// NewSyncJobErrorFunc returns a new instance of SyncJobError with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobErrorFunc(tp elastictransport.Interface) NewSyncJobError { + return func(connectorsyncjobid string) *SyncJobError { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Set a connector sync job error. +// Set the `error` field for a connector sync job and set its `status` to +// `error`. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/set-connector-sync-job-error-api.html +func New(tp elastictransport.Interface) *SyncJobError { + r := &SyncJobError{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobError) Raw(raw io.Reader) *SyncJobError { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobError) Request(req *Request) *SyncJobError { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobError) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobError: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_error") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobError) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_error") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_error") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_error", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_error") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobError query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjoberror.Response +func (r SyncJobError) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_error") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobError headers map. +func (r *SyncJobError) Header(key, value string) *SyncJobError { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier for the connector sync job. +// API Name: connectorsyncjobid +func (r *SyncJobError) _connectorsyncjobid(connectorsyncjobid string) *SyncJobError { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobError) ErrorTrace(errortrace bool) *SyncJobError { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobError) FilterPath(filterpaths ...string) *SyncJobError { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobError) Human(human bool) *SyncJobError { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobError) Pretty(pretty bool) *SyncJobError { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The error for the connector sync job error field. +// API name: error +func (r *SyncJobError) Error(error string) *SyncJobError { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Error = error + + return r +} diff --git a/typedapi/connector/syncjobget/response.go b/typedapi/connector/syncjobget/response.go index dedc920e03..ebddd2fdba 100644 --- a/typedapi/connector/syncjobget/response.go +++ b/typedapi/connector/syncjobget/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package syncjobget @@ -36,7 +36,7 @@ import ( // Response holds the response body struct for the package syncjobget // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/sync_job_get/SyncJobGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_get/SyncJobGetResponse.ts#L22-L24 type Response struct { CancelationRequestedAt types.DateTime `json:"cancelation_requested_at,omitempty"` CanceledAt types.DateTime `json:"canceled_at,omitempty"` diff --git a/typedapi/connector/syncjobget/sync_job_get.go b/typedapi/connector/syncjobget/sync_job_get.go index a0afe0a5bb..18fbcf8fbf 100644 --- a/typedapi/connector/syncjobget/sync_job_get.go +++ b/typedapi/connector/syncjobget/sync_job_get.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves a connector sync job. +// Get a connector sync job. package syncjobget import ( @@ -76,7 +76,7 @@ func NewSyncJobGetFunc(tp elastictransport.Interface) NewSyncJobGet { } } -// Retrieves a connector sync job. +// Get a connector sync job. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-sync-job-api.html func New(tp elastictransport.Interface) *SyncJobGet { diff --git a/typedapi/connector/syncjoblist/response.go b/typedapi/connector/syncjoblist/response.go index 6640453965..a3de62c112 100644 --- a/typedapi/connector/syncjoblist/response.go +++ b/typedapi/connector/syncjoblist/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package syncjoblist @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package syncjoblist // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/sync_job_list/SyncJobListResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_list/SyncJobListResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Results []types.ConnectorSyncJob `json:"results"` diff --git a/typedapi/connector/syncjoblist/sync_job_list.go b/typedapi/connector/syncjoblist/sync_job_list.go index 4276279e0c..a435892793 100644 --- a/typedapi/connector/syncjoblist/sync_job_list.go +++ b/typedapi/connector/syncjoblist/sync_job_list.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Lists connector sync jobs. +// Get all connector sync jobs. +// +// Get information about all stored connector sync jobs listed by their creation +// date in ascending order. package syncjoblist import ( @@ -70,7 +73,10 @@ func NewSyncJobListFunc(tp elastictransport.Interface) NewSyncJobList { } } -// Lists connector sync jobs. +// Get all connector sync jobs. +// +// Get information about all stored connector sync jobs listed by their creation +// date in ascending order. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-connector-sync-jobs-api.html func New(tp elastictransport.Interface) *SyncJobList { diff --git a/typedapi/connector/syncjobpost/request.go b/typedapi/connector/syncjobpost/request.go index a9f5f8912f..87f2b99ac3 100644 --- a/typedapi/connector/syncjobpost/request.go +++ b/typedapi/connector/syncjobpost/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package syncjobpost @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package syncjobpost // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/sync_job_post/SyncJobPostRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_post/SyncJobPostRequest.ts#L23-L51 type Request struct { // Id The id of the associated connector diff --git a/typedapi/connector/syncjobpost/response.go b/typedapi/connector/syncjobpost/response.go index 338a98f77b..6002c560fb 100644 --- a/typedapi/connector/syncjobpost/response.go +++ b/typedapi/connector/syncjobpost/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package syncjobpost // Response holds the response body struct for the package syncjobpost // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/sync_job_post/SyncJobPostResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_post/SyncJobPostResponse.ts#L22-L26 type Response struct { Id string `json:"id"` } diff --git a/typedapi/connector/syncjobpost/sync_job_post.go b/typedapi/connector/syncjobpost/sync_job_post.go index 8589a81793..b54d75a905 100644 --- a/typedapi/connector/syncjobpost/sync_job_post.go +++ b/typedapi/connector/syncjobpost/sync_job_post.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a connector sync job. +// Create a connector sync job. +// +// Create a connector sync job document in the internal index and initialize its +// counters and timestamps with default values. package syncjobpost import ( @@ -75,7 +78,10 @@ func NewSyncJobPostFunc(tp elastictransport.Interface) NewSyncJobPost { } } -// Creates a connector sync job. +// Create a connector sync job. +// +// Create a connector sync job document in the internal index and initialize its +// counters and timestamps with default values. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-sync-job-api.html func New(tp elastictransport.Interface) *SyncJobPost { @@ -85,8 +91,6 @@ func New(tp elastictransport.Interface) *SyncJobPost { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -338,9 +342,14 @@ func (r *SyncJobPost) Pretty(pretty bool) *SyncJobPost { return r } -// Id The id of the associated connector +// The id of the associated connector // API name: id func (r *SyncJobPost) Id(id string) *SyncJobPost { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Id = id return r @@ -348,14 +357,20 @@ func (r *SyncJobPost) Id(id string) *SyncJobPost { // API name: job_type func (r *SyncJobPost) JobType(jobtype syncjobtype.SyncJobType) *SyncJobPost { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.JobType = &jobtype - return r } // API name: trigger_method func (r *SyncJobPost) TriggerMethod(triggermethod syncjobtriggermethod.SyncJobTriggerMethod) *SyncJobPost { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TriggerMethod = &triggermethod - return r } diff --git a/typedapi/connector/syncjobupdatestats/request.go b/typedapi/connector/syncjobupdatestats/request.go new file mode 100644 index 0000000000..dbb5176daa --- /dev/null +++ b/typedapi/connector/syncjobupdatestats/request.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package syncjobupdatestats + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package syncjobupdatestats +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_update_stats/SyncJobUpdateStatsRequest.ts#L24-L78 +type Request struct { + + // DeletedDocumentCount The number of documents the sync job deleted. + DeletedDocumentCount int64 `json:"deleted_document_count"` + // IndexedDocumentCount The number of documents the sync job indexed. + IndexedDocumentCount int64 `json:"indexed_document_count"` + // IndexedDocumentVolume The total size of the data (in MiB) the sync job indexed. + IndexedDocumentVolume int64 `json:"indexed_document_volume"` + // LastSeen The timestamp to use in the `last_seen` property for the connector sync job. + LastSeen types.Duration `json:"last_seen,omitempty"` + // Metadata The connector-specific metadata. + Metadata types.Metadata `json:"metadata,omitempty"` + // TotalDocumentCount The total number of documents in the target index after the sync job + // finished. + TotalDocumentCount *int `json:"total_document_count,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjobupdatestats request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deleted_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeletedDocumentCount", err) + } + s.DeletedDocumentCount = value + case float64: + f := int64(v) + s.DeletedDocumentCount = f + } + + case "indexed_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentCount", err) + } + s.IndexedDocumentCount = value + case float64: + f := int64(v) + s.IndexedDocumentCount = f + } + + case "indexed_document_volume": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentVolume", err) + } + s.IndexedDocumentVolume = value + case float64: + f := int64(v) + s.IndexedDocumentVolume = f + } + + case "last_seen": + if err := dec.Decode(&s.LastSeen); err != nil { + return fmt.Errorf("%s | %w", "LastSeen", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "total_document_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDocumentCount", err) + } + s.TotalDocumentCount = &value + case float64: + f := int(v) + s.TotalDocumentCount = &f + } + + } + } + return nil +} diff --git a/typedapi/connector/syncjobupdatestats/response.go b/typedapi/connector/syncjobupdatestats/response.go new file mode 100644 index 0000000000..ea46fc3eea --- /dev/null +++ b/typedapi/connector/syncjobupdatestats/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package syncjobupdatestats + +// Response holds the response body struct for the package syncjobupdatestats +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/sync_job_update_stats/SyncJobUpdateStatsResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/connector/syncjobupdatestats/sync_job_update_stats.go b/typedapi/connector/syncjobupdatestats/sync_job_update_stats.go new file mode 100644 index 0000000000..1937e58929 --- /dev/null +++ b/typedapi/connector/syncjobupdatestats/sync_job_update_stats.go @@ -0,0 +1,459 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Set the connector sync job stats. +// Stats include: `deleted_document_count`, `indexed_document_count`, +// `indexed_document_volume`, and `total_document_count`. +// You can also update `last_seen`. +// This API is mainly used by the connector service for updating sync job +// information. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjobupdatestats + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobUpdateStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobUpdateStats type alias for index. +type NewSyncJobUpdateStats func(connectorsyncjobid string) *SyncJobUpdateStats + +// NewSyncJobUpdateStatsFunc returns a new instance of SyncJobUpdateStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobUpdateStatsFunc(tp elastictransport.Interface) NewSyncJobUpdateStats { + return func(connectorsyncjobid string) *SyncJobUpdateStats { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Set the connector sync job stats. +// Stats include: `deleted_document_count`, `indexed_document_count`, +// `indexed_document_volume`, and `total_document_count`. +// You can also update `last_seen`. +// This API is mainly used by the connector service for updating sync job +// information. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/set-connector-sync-job-stats-api.html +func New(tp elastictransport.Interface) *SyncJobUpdateStats { + r := &SyncJobUpdateStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobUpdateStats) Raw(raw io.Reader) *SyncJobUpdateStats { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobUpdateStats) Request(req *Request) *SyncJobUpdateStats { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobUpdateStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobUpdateStats: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobUpdateStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_update_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_update_stats") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_update_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_update_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobUpdateStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobupdatestats.Response +func (r SyncJobUpdateStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_update_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobUpdateStats headers map. +func (r *SyncJobUpdateStats) Header(key, value string) *SyncJobUpdateStats { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job. +// API Name: connectorsyncjobid +func (r *SyncJobUpdateStats) _connectorsyncjobid(connectorsyncjobid string) *SyncJobUpdateStats { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobUpdateStats) ErrorTrace(errortrace bool) *SyncJobUpdateStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobUpdateStats) FilterPath(filterpaths ...string) *SyncJobUpdateStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobUpdateStats) Human(human bool) *SyncJobUpdateStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobUpdateStats) Pretty(pretty bool) *SyncJobUpdateStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The number of documents the sync job deleted. +// API name: deleted_document_count +func (r *SyncJobUpdateStats) DeletedDocumentCount(deleteddocumentcount int64) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DeletedDocumentCount = deleteddocumentcount + + return r +} + +// The number of documents the sync job indexed. +// API name: indexed_document_count +func (r *SyncJobUpdateStats) IndexedDocumentCount(indexeddocumentcount int64) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexedDocumentCount = indexeddocumentcount + + return r +} + +// The total size of the data (in MiB) the sync job indexed. +// API name: indexed_document_volume +func (r *SyncJobUpdateStats) IndexedDocumentVolume(indexeddocumentvolume int64) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexedDocumentVolume = indexeddocumentvolume + + return r +} + +// The timestamp to use in the `last_seen` property for the connector sync job. +// API name: last_seen +func (r *SyncJobUpdateStats) LastSeen(duration types.DurationVariant) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSeen = *duration.DurationCaster() + + return r +} + +// The connector-specific metadata. +// API name: metadata +func (r *SyncJobUpdateStats) Metadata(metadata types.MetadataVariant) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// The total number of documents in the target index after the sync job +// finished. +// API name: total_document_count +func (r *SyncJobUpdateStats) TotalDocumentCount(totaldocumentcount int) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TotalDocumentCount = &totaldocumentcount + + return r +} diff --git a/typedapi/connector/updateactivefiltering/response.go b/typedapi/connector/updateactivefiltering/response.go index 17742cd5ba..212174eb41 100644 --- a/typedapi/connector/updateactivefiltering/response.go +++ b/typedapi/connector/updateactivefiltering/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateactivefiltering @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateactivefiltering // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_active_filtering/ConnectorUpdateActiveFilteringResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_active_filtering/ConnectorUpdateActiveFilteringResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateactivefiltering/update_active_filtering.go b/typedapi/connector/updateactivefiltering/update_active_filtering.go index f2255e9f11..a97d8b41de 100644 --- a/typedapi/connector/updateactivefiltering/update_active_filtering.go +++ b/typedapi/connector/updateactivefiltering/update_active_filtering.go @@ -16,8 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Activate the connector draft filter. +// // Activates the valid draft filtering for a connector. package updateactivefiltering @@ -76,6 +78,8 @@ func NewUpdateActiveFilteringFunc(tp elastictransport.Interface) NewUpdateActive } } +// Activate the connector draft filter. +// // Activates the valid draft filtering for a connector. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-api.html diff --git a/typedapi/connector/updateapikeyid/request.go b/typedapi/connector/updateapikeyid/request.go index 0582692d70..de8fe40d6f 100644 --- a/typedapi/connector/updateapikeyid/request.go +++ b/typedapi/connector/updateapikeyid/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateapikeyid @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updateapikeyid // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDRequest.ts#L21-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDRequest.ts#L21-L53 type Request struct { ApiKeyId *string `json:"api_key_id,omitempty"` ApiKeySecretId *string `json:"api_key_secret_id,omitempty"` diff --git a/typedapi/connector/updateapikeyid/response.go b/typedapi/connector/updateapikeyid/response.go index 8c7c205666..12269d833d 100644 --- a/typedapi/connector/updateapikeyid/response.go +++ b/typedapi/connector/updateapikeyid/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateapikeyid @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateapikeyid // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateapikeyid/update_api_key_id.go b/typedapi/connector/updateapikeyid/update_api_key_id.go index 110a05168c..dda80213a4 100644 --- a/typedapi/connector/updateapikeyid/update_api_key_id.go +++ b/typedapi/connector/updateapikeyid/update_api_key_id.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the API key id in the connector document +// Update the connector API key ID. +// +// Update the `api_key_id` and `api_key_secret_id` fields of a connector. +// You can specify the ID of the API key used for authorization and the ID of +// the connector secret where the API key is stored. +// The connector secret ID is required only for Elastic managed (native) +// connectors. +// Self-managed connectors (connector clients) do not use this field. package updateapikeyid import ( @@ -81,7 +88,14 @@ func NewUpdateApiKeyIdFunc(tp elastictransport.Interface) NewUpdateApiKeyId { } } -// Updates the API key id in the connector document +// Update the connector API key ID. +// +// Update the `api_key_id` and `api_key_secret_id` fields of a connector. +// You can specify the ID of the API key used for authorization and the ID of +// the connector secret where the API key is stored. +// The connector secret ID is required only for Elastic managed (native) +// connectors. +// Self-managed connectors (connector clients) do not use this field. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-api-key-id-api.html func New(tp elastictransport.Interface) *UpdateApiKeyId { @@ -91,8 +105,6 @@ func New(tp elastictransport.Interface) *UpdateApiKeyId { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,6 +373,10 @@ func (r *UpdateApiKeyId) Pretty(pretty bool) *UpdateApiKeyId { // API name: api_key_id func (r *UpdateApiKeyId) ApiKeyId(apikeyid string) *UpdateApiKeyId { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ApiKeyId = &apikeyid @@ -369,6 +385,10 @@ func (r *UpdateApiKeyId) ApiKeyId(apikeyid string) *UpdateApiKeyId { // API name: api_key_secret_id func (r *UpdateApiKeyId) ApiKeySecretId(apikeysecretid string) *UpdateApiKeyId { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ApiKeySecretId = &apikeysecretid diff --git a/typedapi/connector/updateconfiguration/request.go b/typedapi/connector/updateconfiguration/request.go index a543962897..ba6e6868c2 100644 --- a/typedapi/connector/updateconfiguration/request.go +++ b/typedapi/connector/updateconfiguration/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateconfiguration @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package updateconfiguration // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_configuration/ConnectorUpdateConfigurationRequest.ts#L25-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_configuration/ConnectorUpdateConfigurationRequest.ts#L25-L55 type Request struct { Configuration types.ConnectorConfiguration `json:"configuration,omitempty"` Values map[string]json.RawMessage `json:"values,omitempty"` diff --git a/typedapi/connector/updateconfiguration/response.go b/typedapi/connector/updateconfiguration/response.go index 7c84714c74..87e8f5c9fe 100644 --- a/typedapi/connector/updateconfiguration/response.go +++ b/typedapi/connector/updateconfiguration/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateconfiguration @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateconfiguration // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_configuration/ConnectorUpdateConfigurationResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_configuration/ConnectorUpdateConfigurationResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateconfiguration/update_configuration.go b/typedapi/connector/updateconfiguration/update_configuration.go index c1e27f338b..f57e6d6f23 100644 --- a/typedapi/connector/updateconfiguration/update_configuration.go +++ b/typedapi/connector/updateconfiguration/update_configuration.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the configuration field in the connector document +// Update the connector configuration. +// +// Update the configuration field in the connector document. package updateconfiguration import ( @@ -81,7 +83,9 @@ func NewUpdateConfigurationFunc(tp elastictransport.Interface) NewUpdateConfigur } } -// Updates the configuration field in the connector document +// Update the connector configuration. +// +// Update the configuration field in the connector document. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-configuration-api.html func New(tp elastictransport.Interface) *UpdateConfiguration { @@ -91,8 +95,6 @@ func New(tp elastictransport.Interface) *UpdateConfiguration { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -360,16 +362,42 @@ func (r *UpdateConfiguration) Pretty(pretty bool) *UpdateConfiguration { } // API name: configuration -func (r *UpdateConfiguration) Configuration(connectorconfiguration types.ConnectorConfiguration) *UpdateConfiguration { - r.req.Configuration = connectorconfiguration +func (r *UpdateConfiguration) Configuration(connectorconfiguration types.ConnectorConfigurationVariant) *UpdateConfiguration { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Configuration = *connectorconfiguration.ConnectorConfigurationCaster() return r } // API name: values func (r *UpdateConfiguration) Values(values map[string]json.RawMessage) *UpdateConfiguration { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Values = values + return r +} + +func (r *UpdateConfiguration) AddValue(key string, value json.RawMessage) *UpdateConfiguration { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Values == nil { + r.req.Values = make(map[string]json.RawMessage) + } else { + tmp = r.req.Values + } + + tmp[key] = value + r.req.Values = tmp return r } diff --git a/typedapi/connector/updateerror/request.go b/typedapi/connector/updateerror/request.go index af714bc66b..bcaa6284a6 100644 --- a/typedapi/connector/updateerror/request.go +++ b/typedapi/connector/updateerror/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateerror @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package updateerror // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_error/ConnectorUpdateErrorRequest.ts#L23-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_error/ConnectorUpdateErrorRequest.ts#L23-L54 type Request struct { Error any `json:"error"` } diff --git a/typedapi/connector/updateerror/response.go b/typedapi/connector/updateerror/response.go index 8c05e9c1ec..af6839ea96 100644 --- a/typedapi/connector/updateerror/response.go +++ b/typedapi/connector/updateerror/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateerror @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateerror // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_error/ConnectorUpdateErrorResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_error/ConnectorUpdateErrorResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateerror/update_error.go b/typedapi/connector/updateerror/update_error.go index 481de3ade8..63baa1c5e5 100644 --- a/typedapi/connector/updateerror/update_error.go +++ b/typedapi/connector/updateerror/update_error.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the filtering field in the connector document +// Update the connector error field. +// +// Set the error field for the connector. +// If the error provided in the request body is non-null, the connector’s status +// is updated to error. +// Otherwise, if the error is reset to null, the connector status is updated to +// connected. package updateerror import ( @@ -81,7 +87,13 @@ func NewUpdateErrorFunc(tp elastictransport.Interface) NewUpdateError { } } -// Updates the filtering field in the connector document +// Update the connector error field. +// +// Set the error field for the connector. +// If the error provided in the request body is non-null, the connector’s status +// is updated to error. +// Otherwise, if the error is reset to null, the connector status is updated to +// connected. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-error-api.html func New(tp elastictransport.Interface) *UpdateError { @@ -91,8 +103,6 @@ func New(tp elastictransport.Interface) *UpdateError { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,6 +371,11 @@ func (r *UpdateError) Pretty(pretty bool) *UpdateError { // API name: error func (r *UpdateError) Error(error any) *UpdateError { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Error = error return r diff --git a/typedapi/connector/updatefeatures/request.go b/typedapi/connector/updatefeatures/request.go new file mode 100644 index 0000000000..7952a08f18 --- /dev/null +++ b/typedapi/connector/updatefeatures/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package updatefeatures + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package updatefeatures +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_features/ConnectorUpdateFeaturesRequest.ts#L23-L61 +type Request struct { + Features types.ConnectorFeatures `json:"features"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatefeatures request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/types/synonymsupdateresult.go b/typedapi/connector/updatefeatures/response.go similarity index 57% rename from typedapi/types/synonymsupdateresult.go rename to typedapi/connector/updatefeatures/response.go index 71753dbaf8..24c8b319c2 100644 --- a/typedapi/types/synonymsupdateresult.go +++ b/typedapi/connector/updatefeatures/response.go @@ -16,28 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -package types +package updatefeatures import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" ) -// SynonymsUpdateResult type. +// Response holds the response body struct for the package updatefeatures // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/_types/SynonymsUpdateResult.ts#L23-L34 -type SynonymsUpdateResult struct { - // ReloadAnalyzersDetails Updating synonyms in a synonym set reloads the associated analyzers. - // This is the analyzers reloading result - ReloadAnalyzersDetails ReloadResult `json:"reload_analyzers_details"` - // Result Update operation result +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_features/ConnectorUpdateFeaturesResponse.ts#L22-L26 +type Response struct { Result result.Result `json:"result"` } -// NewSynonymsUpdateResult returns a SynonymsUpdateResult. -func NewSynonymsUpdateResult() *SynonymsUpdateResult { - r := &SynonymsUpdateResult{} - +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} return r } diff --git a/typedapi/connector/updatefeatures/update_features.go b/typedapi/connector/updatefeatures/update_features.go new file mode 100644 index 0000000000..2dea08e0d6 --- /dev/null +++ b/typedapi/connector/updatefeatures/update_features.go @@ -0,0 +1,400 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Update the connector features. +// Update the connector features in the connector document. +// This API can be used to control the following aspects of a connector: +// +// * document-level security +// * incremental syncs +// * advanced sync rules +// * basic sync rules +// +// Normally, the running connector service automatically manages these features. +// However, you can use this API to override the default behavior. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package updatefeatures + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateFeatures struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateFeatures type alias for index. +type NewUpdateFeatures func(connectorid string) *UpdateFeatures + +// NewUpdateFeaturesFunc returns a new instance of UpdateFeatures with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFeaturesFunc(tp elastictransport.Interface) NewUpdateFeatures { + return func(connectorid string) *UpdateFeatures { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector features. +// Update the connector features in the connector document. +// This API can be used to control the following aspects of a connector: +// +// * document-level security +// * incremental syncs +// * advanced sync rules +// * basic sync rules +// +// Normally, the running connector service automatically manages these features. +// However, you can use this API to override the default behavior. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-features-api.html +func New(tp elastictransport.Interface) *UpdateFeatures { + r := &UpdateFeatures{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateFeatures) Raw(raw io.Reader) *UpdateFeatures { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateFeatures) Request(req *Request) *UpdateFeatures { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateFeatures) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateFeatures: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_features") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateFeatures) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_features") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_features") + if reader := instrument.RecordRequestBody(ctx, "connector.update_features", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_features") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateFeatures query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatefeatures.Response +func (r UpdateFeatures) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateFeatures headers map. +func (r *UpdateFeatures) Header(key, value string) *UpdateFeatures { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated. +// API Name: connectorid +func (r *UpdateFeatures) _connectorid(connectorid string) *UpdateFeatures { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateFeatures) ErrorTrace(errortrace bool) *UpdateFeatures { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateFeatures) FilterPath(filterpaths ...string) *UpdateFeatures { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateFeatures) Human(human bool) *UpdateFeatures { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateFeatures) Pretty(pretty bool) *UpdateFeatures { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: features +func (r *UpdateFeatures) Features(features types.ConnectorFeaturesVariant) *UpdateFeatures { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Features = *features.ConnectorFeaturesCaster() + + return r +} diff --git a/typedapi/connector/updatefiltering/request.go b/typedapi/connector/updatefiltering/request.go index af5a206bfc..a74ae5101d 100644 --- a/typedapi/connector/updatefiltering/request.go +++ b/typedapi/connector/updatefiltering/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatefiltering @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatefiltering // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_filtering/ConnectorUpdateFilteringRequest.ts#L27-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_filtering/ConnectorUpdateFilteringRequest.ts#L27-L60 type Request struct { AdvancedSnippet *types.FilteringAdvancedSnippet `json:"advanced_snippet,omitempty"` Filtering []types.FilteringConfig `json:"filtering,omitempty"` diff --git a/typedapi/connector/updatefiltering/response.go b/typedapi/connector/updatefiltering/response.go index 140344fca4..4c75bed907 100644 --- a/typedapi/connector/updatefiltering/response.go +++ b/typedapi/connector/updatefiltering/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatefiltering @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatefiltering // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_filtering/ConnectorUpdateFilteringResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_filtering/ConnectorUpdateFilteringResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatefiltering/update_filtering.go b/typedapi/connector/updatefiltering/update_filtering.go index 253449c7f5..d287f2046c 100644 --- a/typedapi/connector/updatefiltering/update_filtering.go +++ b/typedapi/connector/updatefiltering/update_filtering.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the filtering field in the connector document +// Update the connector filtering. +// +// Update the draft filtering configuration of a connector and marks the draft +// validation state as edited. +// The filtering draft is activated once validated by the running Elastic +// connector service. +// The filtering property is used to configure sync rules (both basic and +// advanced) for a connector. package updatefiltering import ( @@ -81,7 +88,14 @@ func NewUpdateFilteringFunc(tp elastictransport.Interface) NewUpdateFiltering { } } -// Updates the filtering field in the connector document +// Update the connector filtering. +// +// Update the draft filtering configuration of a connector and marks the draft +// validation state as edited. +// The filtering draft is activated once validated by the running Elastic +// connector service. +// The filtering property is used to configure sync rules (both basic and +// advanced) for a connector. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-api.html func New(tp elastictransport.Interface) *UpdateFiltering { @@ -91,8 +105,6 @@ func New(tp elastictransport.Interface) *UpdateFiltering { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -360,23 +372,41 @@ func (r *UpdateFiltering) Pretty(pretty bool) *UpdateFiltering { } // API name: advanced_snippet -func (r *UpdateFiltering) AdvancedSnippet(advancedsnippet *types.FilteringAdvancedSnippet) *UpdateFiltering { +func (r *UpdateFiltering) AdvancedSnippet(advancedsnippet types.FilteringAdvancedSnippetVariant) *UpdateFiltering { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AdvancedSnippet = advancedsnippet + r.req.AdvancedSnippet = advancedsnippet.FilteringAdvancedSnippetCaster() return r } // API name: filtering -func (r *UpdateFiltering) Filtering(filterings ...types.FilteringConfig) *UpdateFiltering { - r.req.Filtering = filterings +func (r *UpdateFiltering) Filtering(filterings ...types.FilteringConfigVariant) *UpdateFiltering { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range filterings { + r.req.Filtering = append(r.req.Filtering, *v.FilteringConfigCaster()) + + } return r } // API name: rules -func (r *UpdateFiltering) Rules(rules ...types.FilteringRule) *UpdateFiltering { - r.req.Rules = rules +func (r *UpdateFiltering) Rules(rules ...types.FilteringRuleVariant) *UpdateFiltering { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range rules { + r.req.Rules = append(r.req.Rules, *v.FilteringRuleCaster()) + + } return r } diff --git a/typedapi/connector/updatefilteringvalidation/request.go b/typedapi/connector/updatefilteringvalidation/request.go index b2f818b003..e283455663 100644 --- a/typedapi/connector/updatefilteringvalidation/request.go +++ b/typedapi/connector/updatefilteringvalidation/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatefilteringvalidation @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatefilteringvalidation // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationRequest.ts#L23-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationRequest.ts#L23-L48 type Request struct { Validation types.FilteringRulesValidation `json:"validation"` } diff --git a/typedapi/connector/updatefilteringvalidation/response.go b/typedapi/connector/updatefilteringvalidation/response.go index ea3eab03f3..47952de341 100644 --- a/typedapi/connector/updatefilteringvalidation/response.go +++ b/typedapi/connector/updatefilteringvalidation/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatefilteringvalidation @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatefilteringvalidation // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go b/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go index a55b4343e1..37bd5dfed3 100644 --- a/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go +++ b/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the draft filtering validation info for a connector. +// Update the connector draft filtering validation. +// +// Update the draft filtering validation info for a connector. package updatefilteringvalidation import ( @@ -81,7 +83,9 @@ func NewUpdateFilteringValidationFunc(tp elastictransport.Interface) NewUpdateFi } } -// Updates the draft filtering validation info for a connector. +// Update the connector draft filtering validation. +// +// Update the draft filtering validation info for a connector. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-validation-api.html func New(tp elastictransport.Interface) *UpdateFilteringValidation { @@ -91,8 +95,6 @@ func New(tp elastictransport.Interface) *UpdateFilteringValidation { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -362,9 +364,13 @@ func (r *UpdateFilteringValidation) Pretty(pretty bool) *UpdateFilteringValidati } // API name: validation -func (r *UpdateFilteringValidation) Validation(validation *types.FilteringRulesValidation) *UpdateFilteringValidation { +func (r *UpdateFilteringValidation) Validation(validation types.FilteringRulesValidationVariant) *UpdateFilteringValidation { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Validation = *validation + r.req.Validation = *validation.FilteringRulesValidationCaster() return r } diff --git a/typedapi/connector/updateindexname/request.go b/typedapi/connector/updateindexname/request.go index f31d86c4a1..29e4884c92 100644 --- a/typedapi/connector/updateindexname/request.go +++ b/typedapi/connector/updateindexname/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateindexname @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package updateindexname // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_index_name/ConnectorUpdateIndexNameRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_index_name/ConnectorUpdateIndexNameRequest.ts#L23-L51 type Request struct { IndexName any `json:"index_name"` } diff --git a/typedapi/connector/updateindexname/response.go b/typedapi/connector/updateindexname/response.go index 7c71932a44..f34b45b9da 100644 --- a/typedapi/connector/updateindexname/response.go +++ b/typedapi/connector/updateindexname/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateindexname @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateindexname // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_index_name/ConnectorUpdateIndexNameResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_index_name/ConnectorUpdateIndexNameResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateindexname/update_index_name.go b/typedapi/connector/updateindexname/update_index_name.go index 4c43bfaeee..24f59d8ca8 100644 --- a/typedapi/connector/updateindexname/update_index_name.go +++ b/typedapi/connector/updateindexname/update_index_name.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the index_name in the connector document +// Update the connector index name. +// +// Update the `index_name` field of a connector, specifying the index where the +// data ingested by the connector is stored. package updateindexname import ( @@ -81,7 +84,10 @@ func NewUpdateIndexNameFunc(tp elastictransport.Interface) NewUpdateIndexName { } } -// Updates the index_name in the connector document +// Update the connector index name. +// +// Update the `index_name` field of a connector, specifying the index where the +// data ingested by the connector is stored. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-index-name-api.html func New(tp elastictransport.Interface) *UpdateIndexName { @@ -91,8 +97,6 @@ func New(tp elastictransport.Interface) *UpdateIndexName { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,6 +365,11 @@ func (r *UpdateIndexName) Pretty(pretty bool) *UpdateIndexName { // API name: index_name func (r *UpdateIndexName) IndexName(indexname any) *UpdateIndexName { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexName = indexname return r diff --git a/typedapi/connector/updatename/request.go b/typedapi/connector/updatename/request.go index 4fea05762d..e14f987d07 100644 --- a/typedapi/connector/updatename/request.go +++ b/typedapi/connector/updatename/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatename @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatename // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_name/ConnectorUpdateNameRequest.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_name/ConnectorUpdateNameRequest.ts#L22-L49 type Request struct { Description *string `json:"description,omitempty"` Name *string `json:"name,omitempty"` diff --git a/typedapi/connector/updatename/response.go b/typedapi/connector/updatename/response.go index 501959879b..a1ccafb192 100644 --- a/typedapi/connector/updatename/response.go +++ b/typedapi/connector/updatename/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatename @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatename // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_name/ConnectorUpdateNameResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_name/ConnectorUpdateNameResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatename/update_name.go b/typedapi/connector/updatename/update_name.go index 22b1ae546c..a0ab6615d5 100644 --- a/typedapi/connector/updatename/update_name.go +++ b/typedapi/connector/updatename/update_name.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the name and description fields in the connector document +// Update the connector name and description. package updatename import ( @@ -81,7 +81,7 @@ func NewUpdateNameFunc(tp elastictransport.Interface) NewUpdateName { } } -// Updates the name and description fields in the connector document +// Update the connector name and description. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-name-description-api.html func New(tp elastictransport.Interface) *UpdateName { @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateName { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,6 +359,10 @@ func (r *UpdateName) Pretty(pretty bool) *UpdateName { // API name: description func (r *UpdateName) Description(description string) *UpdateName { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description @@ -369,6 +371,10 @@ func (r *UpdateName) Description(description string) *UpdateName { // API name: name func (r *UpdateName) Name(name string) *UpdateName { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Name = &name diff --git a/typedapi/connector/updatenative/request.go b/typedapi/connector/updatenative/request.go index 4145ded58a..d2b4805d2f 100644 --- a/typedapi/connector/updatenative/request.go +++ b/typedapi/connector/updatenative/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatenative @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatenative // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_native/ConnectorUpdateNativeRequest.ts#L22-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_native/ConnectorUpdateNativeRequest.ts#L22-L48 type Request struct { IsNative bool `json:"is_native"` } diff --git a/typedapi/connector/updatenative/response.go b/typedapi/connector/updatenative/response.go index daf94cff31..c526063f3c 100644 --- a/typedapi/connector/updatenative/response.go +++ b/typedapi/connector/updatenative/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatenative @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatenative // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_native/ConnectorUpdateNativeResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_native/ConnectorUpdateNativeResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatenative/update_native.go b/typedapi/connector/updatenative/update_native.go index 387ad23999..841097cc48 100644 --- a/typedapi/connector/updatenative/update_native.go +++ b/typedapi/connector/updatenative/update_native.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the is_native flag in the connector document +// Update the connector is_native flag. package updatenative import ( @@ -81,7 +81,7 @@ func NewUpdateNativeFunc(tp elastictransport.Interface) NewUpdateNative { } } -// Updates the is_native flag in the connector document +// Update the connector is_native flag. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html func New(tp elastictransport.Interface) *UpdateNative { @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateNative { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,6 +359,11 @@ func (r *UpdateNative) Pretty(pretty bool) *UpdateNative { // API name: is_native func (r *UpdateNative) IsNative(isnative bool) *UpdateNative { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IsNative = isnative return r diff --git a/typedapi/connector/updatepipeline/request.go b/typedapi/connector/updatepipeline/request.go index 16b26bf1ed..93f3ed51bf 100644 --- a/typedapi/connector/updatepipeline/request.go +++ b/typedapi/connector/updatepipeline/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatepipeline @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatepipeline // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_pipeline/ConnectorUpdatePipelineRequest.ts#L23-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_pipeline/ConnectorUpdatePipelineRequest.ts#L23-L52 type Request struct { Pipeline types.IngestPipelineParams `json:"pipeline"` } diff --git a/typedapi/connector/updatepipeline/response.go b/typedapi/connector/updatepipeline/response.go index 2a4c3e1640..63b3758fb4 100644 --- a/typedapi/connector/updatepipeline/response.go +++ b/typedapi/connector/updatepipeline/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatepipeline @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatepipeline // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_pipeline/ConnectorUpdatePipelineResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_pipeline/ConnectorUpdatePipelineResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatepipeline/update_pipeline.go b/typedapi/connector/updatepipeline/update_pipeline.go index 1ff410579f..d4190f4a0a 100644 --- a/typedapi/connector/updatepipeline/update_pipeline.go +++ b/typedapi/connector/updatepipeline/update_pipeline.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the pipeline field in the connector document +// Update the connector pipeline. +// +// When you create a new connector, the configuration of an ingest pipeline is +// populated with default settings. package updatepipeline import ( @@ -81,7 +84,10 @@ func NewUpdatePipelineFunc(tp elastictransport.Interface) NewUpdatePipeline { } } -// Updates the pipeline field in the connector document +// Update the connector pipeline. +// +// When you create a new connector, the configuration of an ingest pipeline is +// populated with default settings. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-pipeline-api.html func New(tp elastictransport.Interface) *UpdatePipeline { @@ -91,8 +97,6 @@ func New(tp elastictransport.Interface) *UpdatePipeline { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -360,9 +364,13 @@ func (r *UpdatePipeline) Pretty(pretty bool) *UpdatePipeline { } // API name: pipeline -func (r *UpdatePipeline) Pipeline(pipeline *types.IngestPipelineParams) *UpdatePipeline { +func (r *UpdatePipeline) Pipeline(pipeline types.IngestPipelineParamsVariant) *UpdatePipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pipeline = *pipeline + r.req.Pipeline = *pipeline.IngestPipelineParamsCaster() return r } diff --git a/typedapi/connector/updatescheduling/request.go b/typedapi/connector/updatescheduling/request.go index a9eff95329..dfe99f50fd 100644 --- a/typedapi/connector/updatescheduling/request.go +++ b/typedapi/connector/updatescheduling/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatescheduling @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatescheduling // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_scheduling/ConnectorUpdateSchedulingRequest.ts#L23-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_scheduling/ConnectorUpdateSchedulingRequest.ts#L23-L50 type Request struct { Scheduling types.SchedulingConfiguration `json:"scheduling"` } diff --git a/typedapi/connector/updatescheduling/response.go b/typedapi/connector/updatescheduling/response.go index 2643849d4f..a44d41d91c 100644 --- a/typedapi/connector/updatescheduling/response.go +++ b/typedapi/connector/updatescheduling/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatescheduling @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatescheduling // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_scheduling/ConnectorUpdateSchedulingResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_scheduling/ConnectorUpdateSchedulingResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatescheduling/update_scheduling.go b/typedapi/connector/updatescheduling/update_scheduling.go index 7ccc4dbd58..d13f012807 100644 --- a/typedapi/connector/updatescheduling/update_scheduling.go +++ b/typedapi/connector/updatescheduling/update_scheduling.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the scheduling field in the connector document +// Update the connector scheduling. package updatescheduling import ( @@ -81,7 +81,7 @@ func NewUpdateSchedulingFunc(tp elastictransport.Interface) NewUpdateScheduling } } -// Updates the scheduling field in the connector document +// Update the connector scheduling. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-scheduling-api.html func New(tp elastictransport.Interface) *UpdateScheduling { @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateScheduling { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -360,9 +358,13 @@ func (r *UpdateScheduling) Pretty(pretty bool) *UpdateScheduling { } // API name: scheduling -func (r *UpdateScheduling) Scheduling(scheduling *types.SchedulingConfiguration) *UpdateScheduling { +func (r *UpdateScheduling) Scheduling(scheduling types.SchedulingConfigurationVariant) *UpdateScheduling { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Scheduling = *scheduling + r.req.Scheduling = *scheduling.SchedulingConfigurationCaster() return r } diff --git a/typedapi/connector/updateservicetype/request.go b/typedapi/connector/updateservicetype/request.go index 9af84bb339..3bfd568298 100644 --- a/typedapi/connector/updateservicetype/request.go +++ b/typedapi/connector/updateservicetype/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateservicetype @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updateservicetype // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_service_type/ConnectorUpdateServiceTypeRequest.ts#L22-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_service_type/ConnectorUpdateServiceTypeRequest.ts#L22-L48 type Request struct { ServiceType string `json:"service_type"` } diff --git a/typedapi/connector/updateservicetype/response.go b/typedapi/connector/updateservicetype/response.go index fbbca850ff..5ec5a767f4 100644 --- a/typedapi/connector/updateservicetype/response.go +++ b/typedapi/connector/updateservicetype/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateservicetype @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updateservicetype // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_service_type/ConnectorUpdateServiceTypeResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_service_type/ConnectorUpdateServiceTypeResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updateservicetype/update_service_type.go b/typedapi/connector/updateservicetype/update_service_type.go index 84412adb57..b9ddb8a312 100644 --- a/typedapi/connector/updateservicetype/update_service_type.go +++ b/typedapi/connector/updateservicetype/update_service_type.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the service type of the connector +// Update the connector service type. package updateservicetype import ( @@ -81,7 +81,7 @@ func NewUpdateServiceTypeFunc(tp elastictransport.Interface) NewUpdateServiceTyp } } -// Updates the service type of the connector +// Update the connector service type. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-service-type-api.html func New(tp elastictransport.Interface) *UpdateServiceType { @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateServiceType { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,6 +359,10 @@ func (r *UpdateServiceType) Pretty(pretty bool) *UpdateServiceType { // API name: service_type func (r *UpdateServiceType) ServiceType(servicetype string) *UpdateServiceType { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ServiceType = servicetype diff --git a/typedapi/connector/updatestatus/request.go b/typedapi/connector/updatestatus/request.go index dceddf6680..5049914ffe 100644 --- a/typedapi/connector/updatestatus/request.go +++ b/typedapi/connector/updatestatus/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatestatus @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatestatus // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_status/ConnectorUpdateStatusRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_status/ConnectorUpdateStatusRequest.ts#L23-L49 type Request struct { Status connectorstatus.ConnectorStatus `json:"status"` } diff --git a/typedapi/connector/updatestatus/response.go b/typedapi/connector/updatestatus/response.go index 172f1f9bf9..b46a59660e 100644 --- a/typedapi/connector/updatestatus/response.go +++ b/typedapi/connector/updatestatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatestatus @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatestatus // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/update_status/ConnectorUpdateStatusResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/update_status/ConnectorUpdateStatusResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/connector/updatestatus/update_status.go b/typedapi/connector/updatestatus/update_status.go index 516aad0bf2..34a453ae09 100644 --- a/typedapi/connector/updatestatus/update_status.go +++ b/typedapi/connector/updatestatus/update_status.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the status of the connector +// Update the connector status. package updatestatus import ( @@ -82,7 +82,7 @@ func NewUpdateStatusFunc(tp elastictransport.Interface) NewUpdateStatus { } } -// Updates the status of the connector +// Update the connector status. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-status-api.html func New(tp elastictransport.Interface) *UpdateStatus { @@ -92,8 +92,6 @@ func New(tp elastictransport.Interface) *UpdateStatus { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -362,7 +360,10 @@ func (r *UpdateStatus) Pretty(pretty bool) *UpdateStatus { // API name: status func (r *UpdateStatus) Status(status connectorstatus.ConnectorStatus) *UpdateStatus { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Status = status - return r } diff --git a/typedapi/core/bulk/bulk.go b/typedapi/core/bulk/bulk.go index 788e8482ac..eafe908b03 100644 --- a/typedapi/core/bulk/bulk.go +++ b/typedapi/core/bulk/bulk.go @@ -16,11 +16,165 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Bulk index or delete documents. -// Performs multiple indexing or delete operations in a single API call. +// Perform multiple `index`, `create`, `delete`, and `update` actions in a +// single request. // This reduces overhead and can greatly increase indexing speed. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To use the `create` action, you must have the `create_doc`, `create`, +// `index`, or `write` index privilege. Data streams support only the `create` +// action. +// * To use the `index` action, you must have the `create`, `index`, or `write` +// index privilege. +// * To use the `delete` action, you must have the `delete` or `write` index +// privilege. +// * To use the `update` action, you must have the `index` or `write` index +// privilege. +// * To automatically create a data stream or index with a bulk API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// * To make the result of a bulk operation visible to search using the +// `refresh` parameter, you must have the `maintenance` or `manage` index +// privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The actions are specified in the request body using a newline delimited JSON +// (NDJSON) structure: +// +// ``` +// action_and_meta_data\n +// optional_source\n +// action_and_meta_data\n +// optional_source\n +// .... +// action_and_meta_data\n +// optional_source\n +// ``` +// +// The `index` and `create` actions expect a source on the next line and have +// the same semantics as the `op_type` parameter in the standard index API. +// A `create` action fails if a document with the same ID already exists in the +// target +// An `index` action adds or replaces a document as necessary. +// +// NOTE: Data streams support only the `create` action. +// To update or delete a document in a data stream, you must target the backing +// index containing the document. +// +// An `update` action expects that the partial doc, upsert, and script and its +// options are specified on the next line. +// +// A `delete` action does not expect a source on the next line and has the same +// semantics as the standard delete API. +// +// NOTE: The final line of data must end with a newline character (`\n`). +// Each newline character may be preceded by a carriage return (`\r`). +// When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header +// of `application/json` or `application/x-ndjson`. +// Because this format uses literal newline characters (`\n`) as delimiters, +// make sure that the JSON actions and sources are not pretty printed. +// +// If you provide a target in the request path, it is used for any actions that +// don't explicitly specify an `_index` argument. +// +// A note on the format: the idea here is to make processing as fast as +// possible. +// As some of the actions are redirected to other shards on other nodes, only +// `action_meta_data` is parsed on the receiving node side. +// +// Client libraries using this protocol should try and strive to do something +// similar on the client side, and reduce buffering as much as possible. +// +// There is no "correct" number of actions to perform in a single bulk request. +// Experiment with different settings to find the optimal size for your +// particular workload. +// Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by +// default so clients must ensure that no request exceeds this size. +// It is not possible to index a single document that exceeds the size limit, so +// you must pre-process any such documents into smaller pieces before sending +// them to Elasticsearch. +// For instance, split documents into pages or chapters before indexing them, or +// store raw binary data in a system outside Elasticsearch and replace the raw +// data with a link to the external system in the documents that you send to +// Elasticsearch. +// +// **Client suppport for bulk requests** +// +// Some of the officially supported clients provide helpers to assist with bulk +// requests and reindexing: +// +// * Go: Check out `esutil.BulkIndexer` +// * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and +// `Search::Elasticsearch::Client::5_0::Scroll` +// * Python: Check out `elasticsearch.helpers.*` +// * JavaScript: Check out `client.helpers.*` +// * .NET: Check out `BulkAllObservable` +// * PHP: Check out bulk indexing. +// +// **Submitting bulk requests with cURL** +// +// If you're providing text file input to `curl`, you must use the +// `--data-binary` flag instead of plain `-d`. +// The latter doesn't preserve newlines. For example: +// +// ``` +// $ cat requests +// { "index" : { "_index" : "test", "_id" : "1" } } +// { "field1" : "value1" } +// $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk +// --data-binary "@requests"; echo +// {"took":7, "errors": false, +// "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +// ``` +// +// **Optimistic concurrency control** +// +// Each `index` and `delete` action within a bulk API call may include the +// `if_seq_no` and `if_primary_term` parameters in their respective action and +// meta data lines. +// The `if_seq_no` and `if_primary_term` parameters control how operations are +// run, based on the last modification to existing documents. See Optimistic +// concurrency control for more details. +// +// **Versioning** +// +// Each bulk item can include the version value using the `version` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_version` mapping. +// It also support the `version_type`. +// +// **Routing** +// +// Each bulk item can include the routing value using the `routing` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_routing` mapping. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Wait for active shards** +// +// When making bulk calls, you can set the `wait_for_active_shards` parameter to +// require a minimum number of shard copies to be active before starting to +// process the bulk request. +// +// **Refresh** +// +// Control when the changes made by this request are visible to search. +// +// NOTE: Only the shards that receive the bulk request will be affected by +// refresh. +// Imagine a `_bulk?refresh=wait_for` request with three documents in it that +// happen to be routed to different shards in an index with five shards. +// The request will only wait for those three shards to refresh. +// The other two shards that make up the index do not participate in the `_bulk` +// request at all. package bulk import ( @@ -83,9 +237,163 @@ func NewBulkFunc(tp elastictransport.Interface) NewBulk { } // Bulk index or delete documents. -// Performs multiple indexing or delete operations in a single API call. +// Perform multiple `index`, `create`, `delete`, and `update` actions in a +// single request. // This reduces overhead and can greatly increase indexing speed. // +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To use the `create` action, you must have the `create_doc`, `create`, +// `index`, or `write` index privilege. Data streams support only the `create` +// action. +// * To use the `index` action, you must have the `create`, `index`, or `write` +// index privilege. +// * To use the `delete` action, you must have the `delete` or `write` index +// privilege. +// * To use the `update` action, you must have the `index` or `write` index +// privilege. +// * To automatically create a data stream or index with a bulk API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// * To make the result of a bulk operation visible to search using the +// `refresh` parameter, you must have the `maintenance` or `manage` index +// privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The actions are specified in the request body using a newline delimited JSON +// (NDJSON) structure: +// +// ``` +// action_and_meta_data\n +// optional_source\n +// action_and_meta_data\n +// optional_source\n +// .... +// action_and_meta_data\n +// optional_source\n +// ``` +// +// The `index` and `create` actions expect a source on the next line and have +// the same semantics as the `op_type` parameter in the standard index API. +// A `create` action fails if a document with the same ID already exists in the +// target +// An `index` action adds or replaces a document as necessary. +// +// NOTE: Data streams support only the `create` action. +// To update or delete a document in a data stream, you must target the backing +// index containing the document. +// +// An `update` action expects that the partial doc, upsert, and script and its +// options are specified on the next line. +// +// A `delete` action does not expect a source on the next line and has the same +// semantics as the standard delete API. +// +// NOTE: The final line of data must end with a newline character (`\n`). +// Each newline character may be preceded by a carriage return (`\r`). +// When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header +// of `application/json` or `application/x-ndjson`. +// Because this format uses literal newline characters (`\n`) as delimiters, +// make sure that the JSON actions and sources are not pretty printed. +// +// If you provide a target in the request path, it is used for any actions that +// don't explicitly specify an `_index` argument. +// +// A note on the format: the idea here is to make processing as fast as +// possible. +// As some of the actions are redirected to other shards on other nodes, only +// `action_meta_data` is parsed on the receiving node side. +// +// Client libraries using this protocol should try and strive to do something +// similar on the client side, and reduce buffering as much as possible. +// +// There is no "correct" number of actions to perform in a single bulk request. +// Experiment with different settings to find the optimal size for your +// particular workload. +// Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by +// default so clients must ensure that no request exceeds this size. +// It is not possible to index a single document that exceeds the size limit, so +// you must pre-process any such documents into smaller pieces before sending +// them to Elasticsearch. +// For instance, split documents into pages or chapters before indexing them, or +// store raw binary data in a system outside Elasticsearch and replace the raw +// data with a link to the external system in the documents that you send to +// Elasticsearch. +// +// **Client suppport for bulk requests** +// +// Some of the officially supported clients provide helpers to assist with bulk +// requests and reindexing: +// +// * Go: Check out `esutil.BulkIndexer` +// * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and +// `Search::Elasticsearch::Client::5_0::Scroll` +// * Python: Check out `elasticsearch.helpers.*` +// * JavaScript: Check out `client.helpers.*` +// * .NET: Check out `BulkAllObservable` +// * PHP: Check out bulk indexing. +// +// **Submitting bulk requests with cURL** +// +// If you're providing text file input to `curl`, you must use the +// `--data-binary` flag instead of plain `-d`. +// The latter doesn't preserve newlines. For example: +// +// ``` +// $ cat requests +// { "index" : { "_index" : "test", "_id" : "1" } } +// { "field1" : "value1" } +// $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk +// --data-binary "@requests"; echo +// {"took":7, "errors": false, +// "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +// ``` +// +// **Optimistic concurrency control** +// +// Each `index` and `delete` action within a bulk API call may include the +// `if_seq_no` and `if_primary_term` parameters in their respective action and +// meta data lines. +// The `if_seq_no` and `if_primary_term` parameters control how operations are +// run, based on the last modification to existing documents. See Optimistic +// concurrency control for more details. +// +// **Versioning** +// +// Each bulk item can include the version value using the `version` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_version` mapping. +// It also support the `version_type`. +// +// **Routing** +// +// Each bulk item can include the routing value using the `routing` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_routing` mapping. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Wait for active shards** +// +// When making bulk calls, you can set the `wait_for_active_shards` parameter to +// require a minimum number of shard copies to be active before starting to +// process the bulk request. +// +// **Refresh** +// +// Control when the changes made by this request are visible to search. +// +// NOTE: Only the shards that receive the bulk request will be affected by +// refresh. +// Imagine a `_bulk?refresh=wait_for` request with three documents in it that +// happen to be routed to different shards in an index with five shards. +// The request will only wait for those three shards to refresh. +// The other two shards that make up the index do not participate in the `_bulk` +// request at all. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html func New(tp elastictransport.Interface) *Bulk { r := &Bulk{ @@ -315,7 +623,8 @@ func (r *Bulk) Header(key, value string) *Bulk { return r } -// Index Name of the data stream, index, or index alias to perform bulk actions on. +// Index The name of the data stream, index, or index alias to perform bulk actions +// on. // API Name: index func (r *Bulk) Index(index string) *Bulk { r.paramSet |= indexMask @@ -324,10 +633,28 @@ func (r *Bulk) Index(index string) *Bulk { return r } -// Pipeline ID of the pipeline to use to preprocess incoming documents. -// If the index has a default ingest pipeline specified, then setting the value -// to `_none` disables the default ingest pipeline for this request. -// If a final pipeline is configured it will always run, regardless of the value +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Bulk) IncludeSourceOnError(includesourceonerror bool) *Bulk { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// ListExecutedPipelines If `true`, the response will include the ingest pipelines that were run for +// each index or create. +// API name: list_executed_pipelines +func (r *Bulk) ListExecutedPipelines(listexecutedpipelines bool) *Bulk { + r.values.Set("list_executed_pipelines", strconv.FormatBool(listexecutedpipelines)) + + return r +} + +// Pipeline The pipeline identifier to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, setting the value to +// `_none` turns off the default ingest pipeline for this request. +// If a final pipeline is configured, it will always run regardless of the value // of this parameter. // API name: pipeline func (r *Bulk) Pipeline(pipeline string) *Bulk { @@ -337,8 +664,9 @@ func (r *Bulk) Pipeline(pipeline string) *Bulk { } // Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation -// visible to search, if `wait_for` then wait for a refresh to make this -// operation visible to search, if `false` do nothing with refreshes. +// visible to search. +// If `wait_for`, wait for a refresh to make this operation visible to search. +// If `false`, do nothing with refreshes. // Valid values: `true`, `false`, `wait_for`. // API name: refresh func (r *Bulk) Refresh(refresh refresh.Refresh) *Bulk { @@ -347,7 +675,7 @@ func (r *Bulk) Refresh(refresh refresh.Refresh) *Bulk { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Bulk) Routing(routing string) *Bulk { r.values.Set("routing", routing) @@ -355,8 +683,8 @@ func (r *Bulk) Routing(routing string) *Bulk { return r } -// Source_ `true` or `false` to return the `_source` field or not, or a list of fields -// to return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or +// contains a list of fields to return. // API name: _source func (r *Bulk) Source_(sourceconfigparam string) *Bulk { r.values.Set("_source", sourceconfigparam) @@ -365,6 +693,9 @@ func (r *Bulk) Source_(sourceconfigparam string) *Bulk { } // SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_excludes func (r *Bulk) SourceExcludes_(fields ...string) *Bulk { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -373,6 +704,10 @@ func (r *Bulk) SourceExcludes_(fields ...string) *Bulk { } // SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_includes func (r *Bulk) SourceIncludes_(fields ...string) *Bulk { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -380,8 +715,11 @@ func (r *Bulk) SourceIncludes_(fields ...string) *Bulk { return r } -// Timeout Period each action waits for the following operations: automatic index -// creation, dynamic mapping updates, waiting for active shards. +// Timeout The period each action waits for the following operations: automatic index +// creation, dynamic mapping updates, and waiting for active shards. +// The default is `1m` (one minute), which guarantees Elasticsearch waits for at +// least the timeout before failing. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Bulk) Timeout(duration string) *Bulk { r.values.Set("timeout", duration) @@ -391,8 +729,9 @@ func (r *Bulk) Timeout(duration string) *Bulk { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to all or any positive integer up to the total number of shards in the +// Set to `all` or any positive integer up to the total number of shards in the // index (`number_of_replicas+1`). +// The default is `1`, which waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Bulk) WaitForActiveShards(waitforactiveshards string) *Bulk { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -400,7 +739,7 @@ func (r *Bulk) WaitForActiveShards(waitforactiveshards string) *Bulk { return r } -// RequireAlias If `true`, the request’s actions must target an index alias. +// RequireAlias If `true`, the request's actions must target an index alias. // API name: require_alias func (r *Bulk) RequireAlias(requirealias bool) *Bulk { r.values.Set("require_alias", strconv.FormatBool(requirealias)) @@ -408,6 +747,15 @@ func (r *Bulk) RequireAlias(requirealias bool) *Bulk { return r } +// RequireDataStream If `true`, the request's actions must target a data stream (existing or to be +// created). +// API name: require_data_stream +func (r *Bulk) RequireDataStream(requiredatastream bool) *Bulk { + r.values.Set("require_data_stream", strconv.FormatBool(requiredatastream)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/core/bulk/request.go b/typedapi/core/bulk/request.go index 738c440ca7..5b7d01b4cd 100644 --- a/typedapi/core/bulk/request.go +++ b/typedapi/core/bulk/request.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package bulk // Request holds the request body struct for the package bulk // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/BulkRequest.ts#L32-L104 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/BulkRequest.ts#L32-L247 type Request = []any diff --git a/typedapi/core/bulk/response.go b/typedapi/core/bulk/response.go index e4c7b3777f..f7fc5428ee 100644 --- a/typedapi/core/bulk/response.go +++ b/typedapi/core/bulk/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package bulk @@ -27,12 +27,18 @@ import ( // Response holds the response body struct for the package bulk // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/BulkResponse.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/BulkResponse.ts#L24-L45 type Response struct { - Errors bool `json:"errors"` - IngestTook *int64 `json:"ingest_took,omitempty"` - Items []map[operationtype.OperationType]types.ResponseItem `json:"items"` - Took int64 `json:"took"` + + // Errors If `true`, one or more of the operations in the bulk request did not complete + // successfully. + Errors bool `json:"errors"` + IngestTook *int64 `json:"ingest_took,omitempty"` + // Items The result of each operation in the bulk request, in the order they were + // submitted. + Items []map[operationtype.OperationType]types.ResponseItem `json:"items"` + // Took The length of time, in milliseconds, it took to process the bulk request. + Took int64 `json:"took"` } // NewResponse returns a Response diff --git a/typedapi/core/clearscroll/clear_scroll.go b/typedapi/core/clearscroll/clear_scroll.go index af70f2f82b..a97d4bb0b0 100644 --- a/typedapi/core/clearscroll/clear_scroll.go +++ b/typedapi/core/clearscroll/clear_scroll.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Clears the search context and results for a scrolling search. +// Clear a scrolling search. +// Clear the search context and results for a scrolling search. package clearscroll import ( @@ -75,7 +76,8 @@ func NewClearScrollFunc(tp elastictransport.Interface) NewClearScroll { } } -// Clears the search context and results for a scrolling search. +// Clear a scrolling search. +// Clear the search context and results for a scrolling search. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html func New(tp elastictransport.Interface) *ClearScroll { @@ -85,8 +87,6 @@ func New(tp elastictransport.Interface) *ClearScroll { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -374,10 +374,15 @@ func (r *ClearScroll) Pretty(pretty bool) *ClearScroll { return r } -// ScrollId Scroll IDs to clear. +// The scroll IDs to clear. // To clear all scroll IDs, use `_all`. // API name: scroll_id func (r *ClearScroll) ScrollId(scrollids ...string) *ClearScroll { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScrollId = scrollids return r diff --git a/typedapi/core/clearscroll/request.go b/typedapi/core/clearscroll/request.go index f1e93b247b..18316a82f3 100644 --- a/typedapi/core/clearscroll/request.go +++ b/typedapi/core/clearscroll/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearscroll @@ -30,10 +30,10 @@ import ( // Request holds the request body struct for the package clearscroll // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/clear_scroll/ClearScrollRequest.ts#L23-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/clear_scroll/ClearScrollRequest.ts#L23-L61 type Request struct { - // ScrollId Scroll IDs to clear. + // ScrollId The scroll IDs to clear. // To clear all scroll IDs, use `_all`. ScrollId []string `json:"scroll_id,omitempty"` } diff --git a/typedapi/core/clearscroll/response.go b/typedapi/core/clearscroll/response.go index ec5d214e61..bfb94cecb4 100644 --- a/typedapi/core/clearscroll/response.go +++ b/typedapi/core/clearscroll/response.go @@ -16,15 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearscroll // Response holds the response body struct for the package clearscroll // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/clear_scroll/ClearScrollResponse.ts#L22-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/clear_scroll/ClearScrollResponse.ts#L22-L43 type Response struct { - NumFreed int `json:"num_freed"` + + // NumFreed The number of scrolling search requests cleared. + NumFreed int `json:"num_freed"` + // Succeeded If `true`, the request succeeded. + // This does not indicate whether any scrolling search requests were cleared. Succeeded bool `json:"succeeded"` } diff --git a/typedapi/core/closepointintime/close_point_in_time.go b/typedapi/core/closepointintime/close_point_in_time.go index 66eef38600..6ef50c5cdc 100644 --- a/typedapi/core/closepointintime/close_point_in_time.go +++ b/typedapi/core/closepointintime/close_point_in_time.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Closes a point-in-time. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Close a point in time. +// A point in time must be opened explicitly before being used in search +// requests. +// The `keep_alive` parameter tells Elasticsearch how long it should persist. +// A point in time is automatically closed when the `keep_alive` period has +// elapsed. +// However, keeping points in time has a cost; close them as soon as they are no +// longer required for search requests. package closepointintime import ( @@ -73,7 +80,14 @@ func NewClosePointInTimeFunc(tp elastictransport.Interface) NewClosePointInTime } } -// Closes a point-in-time. +// Close a point in time. +// A point in time must be opened explicitly before being used in search +// requests. +// The `keep_alive` parameter tells Elasticsearch how long it should persist. +// A point in time is automatically closed when the `keep_alive` period has +// elapsed. +// However, keeping points in time has a cost; close them as soon as they are no +// longer required for search requests. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html func New(tp elastictransport.Interface) *ClosePointInTime { @@ -83,8 +97,6 @@ func New(tp elastictransport.Interface) *ClosePointInTime { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -370,9 +382,14 @@ func (r *ClosePointInTime) Pretty(pretty bool) *ClosePointInTime { return r } -// Id The ID of the point-in-time. +// The ID of the point-in-time. // API name: id func (r *ClosePointInTime) Id(id string) *ClosePointInTime { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Id = id return r diff --git a/typedapi/core/closepointintime/request.go b/typedapi/core/closepointintime/request.go index 5958a60ecd..388a9c7a33 100644 --- a/typedapi/core/closepointintime/request.go +++ b/typedapi/core/closepointintime/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package closepointintime @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package closepointintime // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts#L23-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts#L23-L48 type Request struct { // Id The ID of the point-in-time. diff --git a/typedapi/core/closepointintime/response.go b/typedapi/core/closepointintime/response.go index dc4658de1a..b68344d044 100644 --- a/typedapi/core/closepointintime/response.go +++ b/typedapi/core/closepointintime/response.go @@ -16,15 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package closepointintime // Response holds the response body struct for the package closepointintime // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts#L22-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts#L22-L42 type Response struct { - NumFreed int `json:"num_freed"` + + // NumFreed The number of search contexts that were successfully closed. + NumFreed int `json:"num_freed"` + // Succeeded If `true`, all search contexts associated with the point-in-time ID were + // successfully closed. Succeeded bool `json:"succeeded"` } diff --git a/typedapi/core/count/count.go b/typedapi/core/count/count.go index ebd7a91d5f..20884ac691 100644 --- a/typedapi/core/count/count.go +++ b/typedapi/core/count/count.go @@ -16,9 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns number of documents matching a query. +// Count search results. +// Get the number of documents matching a query. +// +// The query can be provided either by using a simple query string as a +// parameter, or by defining Query DSL within the request body. +// The query is optional. When no query is provided, the API uses `match_all` to +// count all the documents. +// +// The count API supports multi-target syntax. You can run a single count API +// search across multiple data streams and indices. +// +// The operation is broadcast across all shards. +// For each shard ID group, a replica is chosen and the search is run against +// it. +// This means that replicas increase the scalability of the count. package count import ( @@ -81,7 +95,21 @@ func NewCountFunc(tp elastictransport.Interface) NewCount { } } -// Returns number of documents matching a query. +// Count search results. +// Get the number of documents matching a query. +// +// The query can be provided either by using a simple query string as a +// parameter, or by defining Query DSL within the request body. +// The query is optional. When no query is provided, the API uses `match_all` to +// count all the documents. +// +// The count API supports multi-target syntax. You can run a single count API +// search across multiple data streams and indices. +// +// The operation is broadcast across all shards. +// For each shard ID group, a replica is chosen and the search is run against +// it. +// This means that replicas increase the scalability of the count. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html func New(tp elastictransport.Interface) *Count { @@ -91,8 +119,6 @@ func New(tp elastictransport.Interface) *Count { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -309,8 +335,8 @@ func (r *Count) Header(key, value string) *Count { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams and indices, omit this parameter or use `*` or // `_all`. // API Name: index @@ -324,6 +350,8 @@ func (r *Count) Index(index string) *Count { // AllowNoIndices If `false`, the request returns an error if any wildcard expression, index // alias, or `_all` value targets only missing or closed indices. // This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices func (r *Count) AllowNoIndices(allownoindices bool) *Count { r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) @@ -331,8 +359,8 @@ func (r *Count) AllowNoIndices(allownoindices bool) *Count { return r } -// Analyzer Analyzer to use for the query string. -// This parameter can only be used when the `q` query string parameter is +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyzer func (r *Count) Analyzer(analyzer string) *Count { @@ -342,7 +370,7 @@ func (r *Count) Analyzer(analyzer string) *Count { } // AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. -// This parameter can only be used when the `q` query string parameter is +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyze_wildcard func (r *Count) AnalyzeWildcard(analyzewildcard bool) *Count { @@ -352,7 +380,7 @@ func (r *Count) AnalyzeWildcard(analyzewildcard bool) *Count { } // DefaultOperator The default operator for query string query: `AND` or `OR`. -// This parameter can only be used when the `q` query string parameter is +// This parameter can be used only when the `q` query string parameter is // specified. // API name: default_operator func (r *Count) DefaultOperator(defaultoperator operator.Operator) *Count { @@ -361,8 +389,9 @@ func (r *Count) DefaultOperator(defaultoperator operator.Operator) *Count { return r } -// Df Field to use as default where no field prefix is given in the query string. -// This parameter can only be used when the `q` query string parameter is +// Df The field to use as a default when no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: df func (r *Count) Df(df string) *Count { @@ -371,10 +400,10 @@ func (r *Count) Df(df string) *Count { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. +// It supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards func (r *Count) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Count { tmp := []string{} @@ -386,7 +415,7 @@ func (r *Count) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard return r } -// IgnoreThrottled If `true`, concrete, expanded or aliased indices are ignored when frozen. +// IgnoreThrottled If `true`, concrete, expanded, or aliased indices are ignored when frozen. // API name: ignore_throttled func (r *Count) IgnoreThrottled(ignorethrottled bool) *Count { r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) @@ -405,6 +434,8 @@ func (r *Count) IgnoreUnavailable(ignoreunavailable bool) *Count { // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: lenient func (r *Count) Lenient(lenient bool) *Count { r.values.Set("lenient", strconv.FormatBool(lenient)) @@ -412,8 +443,8 @@ func (r *Count) Lenient(lenient bool) *Count { return r } -// MinScore Sets the minimum `_score` value that documents must have to be included in -// the result. +// MinScore The minimum `_score` value that documents must have to be included in the +// result. // API name: min_score func (r *Count) MinScore(minscore string) *Count { r.values.Set("min_score", minscore) @@ -421,8 +452,8 @@ func (r *Count) MinScore(minscore string) *Count { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// By default, it is random. // API name: preference func (r *Count) Preference(preference string) *Count { r.values.Set("preference", preference) @@ -430,7 +461,7 @@ func (r *Count) Preference(preference string) *Count { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Count) Routing(routing string) *Count { r.values.Set("routing", routing) @@ -438,9 +469,15 @@ func (r *Count) Routing(routing string) *Count { return r } -// TerminateAfter Maximum number of documents to collect for each shard. +// TerminateAfter The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. +// +// IMPORTANT: Use with caution. +// Elasticsearch applies this parameter to each shard handling the request. +// When possible, let Elasticsearch perform early termination automatically. +// Avoid specifying this parameter for requests that target data streams with +// backing indices across multiple data tiers. // API name: terminate_after func (r *Count) TerminateAfter(terminateafter string) *Count { r.values.Set("terminate_after", terminateafter) @@ -448,7 +485,8 @@ func (r *Count) TerminateAfter(terminateafter string) *Count { return r } -// Q Query in the Lucene query string syntax. +// Q The query in Lucene query string syntax. This parameter cannot be used with a +// request body. // API name: q func (r *Count) Q(q string) *Count { r.values.Set("q", q) @@ -500,11 +538,16 @@ func (r *Count) Pretty(pretty bool) *Count { return r } -// Query Defines the search definition using the Query DSL. +// Defines the search query using Query DSL. A request body query cannot be used +// with the `q` query string parameter. // API name: query -func (r *Count) Query(query *types.Query) *Count { +func (r *Count) Query(query types.QueryVariant) *Count { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } diff --git a/typedapi/core/count/request.go b/typedapi/core/count/request.go index c9e14e95f9..b7212f4834 100644 --- a/typedapi/core/count/request.go +++ b/typedapi/core/count/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package count @@ -29,10 +29,11 @@ import ( // Request holds the request body struct for the package count // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/count/CountRequest.ts#L26-L120 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/count/CountRequest.ts#L26-L154 type Request struct { - // Query Defines the search definition using the Query DSL. + // Query Defines the search query using Query DSL. A request body query cannot be used + // with the `q` query string parameter. Query *types.Query `json:"query,omitempty"` } diff --git a/typedapi/core/count/response.go b/typedapi/core/count/response.go index 99365df1f3..84e292f7c9 100644 --- a/typedapi/core/count/response.go +++ b/typedapi/core/count/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package count @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package count // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/count/CountResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/count/CountResponse.ts#L23-L25 type Response struct { Count int64 `json:"count"` Shards_ types.ShardStatistics `json:"_shards"` diff --git a/typedapi/core/create/create.go b/typedapi/core/create/create.go index 67152166ec..2e37314cb1 100644 --- a/typedapi/core/create/create.go +++ b/typedapi/core/create/create.go @@ -16,13 +16,134 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Index a document. -// Adds a JSON document to the specified data stream or index and makes it -// searchable. -// If the target is an index and the document already exists, the request -// updates the document and increments its version. +// Create a new document in the index. +// +// You can index a new JSON document with the `//_doc/` or +// `//_create/<_id>` APIs +// Using `_create` guarantees that the document is indexed only if it does not +// already exist. +// It returns a 409 response when a document with a same ID already exists in +// the index. +// To update an existing document, you must use the `//_doc/` API. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add a document using the `PUT //_create/<_id>` or `POST +// //_create/<_id>` request formats, you must have the `create_doc`, +// `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. package create import ( @@ -39,6 +160,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -92,11 +214,132 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { } } -// Index a document. -// Adds a JSON document to the specified data stream or index and makes it -// searchable. -// If the target is an index and the document already exists, the request -// updates the document and increments its version. +// Create a new document in the index. +// +// You can index a new JSON document with the `//_doc/` or +// `//_create/<_id>` APIs +// Using `_create` guarantees that the document is indexed only if it does not +// already exist. +// It returns a 409 response when a document with a same ID already exists in +// the index. +// To update an existing document, you must use the `//_doc/` API. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add a document using the `PUT //_create/<_id>` or `POST +// //_create/<_id>` request formats, you must have the `create_doc`, +// `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html func New(tp elastictransport.Interface) *Create { @@ -106,8 +349,6 @@ func New(tp elastictransport.Interface) *Create { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -332,7 +573,9 @@ func (r *Create) Header(key, value string) *Create { return r } -// Id Unique identifier for the document. +// Id A unique identifier for the document. +// To automatically generate a document ID, use the `POST //_doc/` +// request format. // API Name: id func (r *Create) _id(id string) *Create { r.paramSet |= idMask @@ -341,11 +584,11 @@ func (r *Create) _id(id string) *Create { return r } -// Index Name of the data stream or index to target. -// If the target doesn’t exist and matches the name or wildcard (`*`) pattern of +// Index The name of the data stream or index to target. +// If the target doesn't exist and matches the name or wildcard (`*`) pattern of // an index template with a `data_stream` definition, this request creates the // data stream. -// If the target doesn’t exist and doesn’t match a data stream template, this +// If the target doesn't exist and doesn’t match a data stream template, this // request creates the index. // API Name: index func (r *Create) _index(index string) *Create { @@ -355,10 +598,50 @@ func (r *Create) _index(index string) *Create { return r } -// Pipeline ID of the pipeline to use to preprocess incoming documents. -// If the index has a default ingest pipeline specified, then setting the value -// to `_none` disables the default ingest pipeline for this request. -// If a final pipeline is configured it will always run, regardless of the value +// IfPrimaryTerm Only perform the operation if the document has this primary term. +// API name: if_primary_term +func (r *Create) IfPrimaryTerm(ifprimaryterm string) *Create { + r.values.Set("if_primary_term", ifprimaryterm) + + return r +} + +// IfSeqNo Only perform the operation if the document has this sequence number. +// API name: if_seq_no +func (r *Create) IfSeqNo(sequencenumber string) *Create { + r.values.Set("if_seq_no", sequencenumber) + + return r +} + +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Create) IncludeSourceOnError(includesourceonerror bool) *Create { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// OpType Set to `create` to only index the document if it does not already exist (put +// if absent). +// If a document with the specified `_id` already exists, the indexing operation +// will fail. +// The behavior is the same as using the `/_create` endpoint. +// If a document ID is specified, this paramater defaults to `index`. +// Otherwise, it defaults to `create`. +// If the request targets a data stream, an `op_type` of `create` is required. +// API name: op_type +func (r *Create) OpType(optype optype.OpType) *Create { + r.values.Set("op_type", optype.String()) + + return r +} + +// Pipeline The ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, setting the value to +// `_none` turns off the default ingest pipeline for this request. +// If a final pipeline is configured, it will always run regardless of the value // of this parameter. // API name: pipeline func (r *Create) Pipeline(pipeline string) *Create { @@ -368,9 +651,10 @@ func (r *Create) Pipeline(pipeline string) *Create { } // Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation -// visible to search, if `wait_for` then wait for a refresh to make this -// operation visible to search, if `false` do nothing with refreshes. -// Valid values: `true`, `false`, `wait_for`. +// visible to search. +// If `wait_for`, it waits for a refresh to make this operation visible to +// search. +// If `false`, it does nothing with refreshes. // API name: refresh func (r *Create) Refresh(refresh refresh.Refresh) *Create { r.values.Set("refresh", refresh.String()) @@ -378,7 +662,24 @@ func (r *Create) Refresh(refresh refresh.Refresh) *Create { return r } -// Routing Custom value used to route operations to a specific shard. +// RequireAlias If `true`, the destination must be an index alias. +// API name: require_alias +func (r *Create) RequireAlias(requirealias bool) *Create { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) + + return r +} + +// RequireDataStream If `true`, the request's actions must target a data stream (existing or to be +// created). +// API name: require_data_stream +func (r *Create) RequireDataStream(requiredatastream bool) *Create { + r.values.Set("require_data_stream", strconv.FormatBool(requiredatastream)) + + return r +} + +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Create) Routing(routing string) *Create { r.values.Set("routing", routing) @@ -386,8 +687,18 @@ func (r *Create) Routing(routing string) *Create { return r } -// Timeout Period the request waits for the following operations: automatic index +// Timeout The period the request waits for the following operations: automatic index // creation, dynamic mapping updates, waiting for active shards. +// Elasticsearch waits for at least the specified timeout period before failing. +// The actual wait time could be longer, particularly when multiple waits occur. +// +// This parameter is useful for situations where the primary shard assigned to +// perform the operation might not be available when the operation runs. +// Some reasons for this might be that the primary shard is currently recovering +// from a gateway or undergoing relocation. +// By default, the operation will wait on the primary shard to become available +// for at least 1 minute before failing and responding with an error. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Create) Timeout(duration string) *Create { r.values.Set("timeout", duration) @@ -395,9 +706,8 @@ func (r *Create) Timeout(duration string) *Create { return r } -// Version Explicit version number for concurrency control. -// The specified version must match the current version of the document for the -// request to succeed. +// Version The explicit version number for concurrency control. +// It must be a non-negative long number. // API name: version func (r *Create) Version(versionnumber string) *Create { r.values.Set("version", versionnumber) @@ -405,7 +715,7 @@ func (r *Create) Version(versionnumber string) *Create { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *Create) VersionType(versiontype versiontype.VersionType) *Create { r.values.Set("version_type", versiontype.String()) @@ -415,8 +725,9 @@ func (r *Create) VersionType(versiontype versiontype.VersionType) *Create { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to `all` or any positive integer up to the total number of shards in the -// index (`number_of_replicas+1`). +// You can set it to `all` or any positive integer up to the total number of +// shards in the index (`number_of_replicas+1`). +// The default value of `1` means it waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Create) WaitForActiveShards(waitforactiveshards string) *Create { r.values.Set("wait_for_active_shards", waitforactiveshards) diff --git a/typedapi/core/create/request.go b/typedapi/core/create/request.go index 0b18fd1def..bf698617a0 100644 --- a/typedapi/core/create/request.go +++ b/typedapi/core/create/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package create @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/create/CreateRequest.ts#L32-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/create/CreateRequest.ts#L35-L221 type Request = json.RawMessage // NewRequest returns a Request diff --git a/typedapi/core/create/response.go b/typedapi/core/create/response.go index 161a74d4be..94c17c5687 100644 --- a/typedapi/core/create/response.go +++ b/typedapi/core/create/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package create @@ -27,16 +27,25 @@ import ( // Response holds the response body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/create/CreateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/create/CreateResponse.ts#L22-L24 type Response struct { - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Result result.Result `json:"result"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Shards_ types.ShardStatistics `json:"_shards"` - Version_ int64 `json:"_version"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` } // NewResponse returns a Response diff --git a/typedapi/core/delete/delete.go b/typedapi/core/delete/delete.go index 81ed5e83a2..15921ecf83 100644 --- a/typedapi/core/delete/delete.go +++ b/typedapi/core/delete/delete.go @@ -16,10 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a document. -// Removes a JSON document from the specified index. +// +// Remove a JSON document from the specified index. +// +// NOTE: You cannot send deletion requests directly to a data stream. +// To delete a document in a data stream, you must target the backing index +// containing the document. +// +// **Optimistic concurrency control** +// +// Delete operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Versioning** +// +// Each document indexed is versioned. +// When deleting a document, the version can be specified to make sure the +// relevant document you are trying to delete is actually being deleted and it +// has not changed in the meantime. +// Every write operation run on a document, deletes included, causes its version +// to be incremented. +// The version number of a deleted document remains available for a short time +// after deletion to allow for control of concurrent operations. +// The length of time for which a deleted document's version remains available +// is determined by the `index.gc_deletes` index setting. +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to delete a document. +// +// If the `_routing` mapping is set to `required` and no routing value is +// specified, the delete API throws a `RoutingMissingException` and rejects the +// request. +// +// For example: +// +// ``` +// DELETE /my-index-000001/_doc/1?routing=shard-1 +// ``` +// +// This request deletes the document with ID 1, but it is routed based on the +// user. +// The document is not deleted if the correct routing is not specified. +// +// **Distributed** +// +// The delete operation gets hashed into a specific shard ID. +// It then gets redirected into the primary shard within that ID group and +// replicated (if needed) to shard replicas within that ID group. package delete import ( @@ -86,7 +137,58 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } // Delete a document. -// Removes a JSON document from the specified index. +// +// Remove a JSON document from the specified index. +// +// NOTE: You cannot send deletion requests directly to a data stream. +// To delete a document in a data stream, you must target the backing index +// containing the document. +// +// **Optimistic concurrency control** +// +// Delete operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Versioning** +// +// Each document indexed is versioned. +// When deleting a document, the version can be specified to make sure the +// relevant document you are trying to delete is actually being deleted and it +// has not changed in the meantime. +// Every write operation run on a document, deletes included, causes its version +// to be incremented. +// The version number of a deleted document remains available for a short time +// after deletion to allow for control of concurrent operations. +// The length of time for which a deleted document's version remains available +// is determined by the `index.gc_deletes` index setting. +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to delete a document. +// +// If the `_routing` mapping is set to `required` and no routing value is +// specified, the delete API throws a `RoutingMissingException` and rejects the +// request. +// +// For example: +// +// ``` +// DELETE /my-index-000001/_doc/1?routing=shard-1 +// ``` +// +// This request deletes the document with ID 1, but it is routed based on the +// user. +// The document is not deleted if the correct routing is not specified. +// +// **Distributed** +// +// The delete operation gets hashed into a specific shard ID. +// It then gets redirected into the primary shard within that ID group and +// replicated (if needed) to shard replicas within that ID group. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html func New(tp elastictransport.Interface) *Delete { @@ -340,7 +442,7 @@ func (r *Delete) Header(key, value string) *Delete { return r } -// Id Unique identifier for the document. +// Id A unique identifier for the document. // API Name: id func (r *Delete) _id(id string) *Delete { r.paramSet |= idMask @@ -349,7 +451,7 @@ func (r *Delete) _id(id string) *Delete { return r } -// Index Name of the target index. +// Index The name of the target index. // API Name: index func (r *Delete) _index(index string) *Delete { r.paramSet |= indexMask @@ -375,9 +477,10 @@ func (r *Delete) IfSeqNo(sequencenumber string) *Delete { } // Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation -// visible to search, if `wait_for` then wait for a refresh to make this -// operation visible to search, if `false` do nothing with refreshes. -// Valid values: `true`, `false`, `wait_for`. +// visible to search. +// If `wait_for`, it waits for a refresh to make this operation visible to +// search. +// If `false`, it does nothing with refreshes. // API name: refresh func (r *Delete) Refresh(refresh refresh.Refresh) *Delete { r.values.Set("refresh", refresh.String()) @@ -385,7 +488,7 @@ func (r *Delete) Refresh(refresh refresh.Refresh) *Delete { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Delete) Routing(routing string) *Delete { r.values.Set("routing", routing) @@ -393,7 +496,15 @@ func (r *Delete) Routing(routing string) *Delete { return r } -// Timeout Period to wait for active shards. +// Timeout The period to wait for active shards. +// +// This parameter is useful for situations where the primary shard assigned to +// perform the delete operation might not be available when the delete operation +// runs. +// Some reasons for this might be that the primary shard is currently recovering +// from a store or undergoing relocation. +// By default, the delete operation will wait on the primary shard to become +// available for up to 1 minute before failing and responding with an error. // API name: timeout func (r *Delete) Timeout(duration string) *Delete { r.values.Set("timeout", duration) @@ -401,9 +512,8 @@ func (r *Delete) Timeout(duration string) *Delete { return r } -// Version Explicit version number for concurrency control. -// The specified version must match the current version of the document for the -// request to succeed. +// Version An explicit version number for concurrency control. +// It must match the current version of the document for the request to succeed. // API name: version func (r *Delete) Version(versionnumber string) *Delete { r.values.Set("version", versionnumber) @@ -411,7 +521,7 @@ func (r *Delete) Version(versionnumber string) *Delete { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *Delete) VersionType(versiontype versiontype.VersionType) *Delete { r.values.Set("version_type", versiontype.String()) @@ -419,10 +529,11 @@ func (r *Delete) VersionType(versiontype versiontype.VersionType) *Delete { return r } -// WaitForActiveShards The number of shard copies that must be active before proceeding with the -// operation. -// Set to `all` or any positive integer up to the total number of shards in the -// index (`number_of_replicas+1`). +// WaitForActiveShards The minimum number of shard copies that must be active before proceeding with +// the operation. +// You can set it to `all` or any positive integer up to the total number of +// shards in the index (`number_of_replicas+1`). +// The default value of `1` means it waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Delete) WaitForActiveShards(waitforactiveshards string) *Delete { r.values.Set("wait_for_active_shards", waitforactiveshards) diff --git a/typedapi/core/delete/response.go b/typedapi/core/delete/response.go index 7408260a50..49e9694fdf 100644 --- a/typedapi/core/delete/response.go +++ b/typedapi/core/delete/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package delete @@ -27,16 +27,25 @@ import ( // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/delete/DeleteResponse.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/delete/DeleteResponse.ts#L22-L34 type Response struct { - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Result result.Result `json:"result"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Shards_ types.ShardStatistics `json:"_shards"` - Version_ int64 `json:"_version"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` } // NewResponse returns a Response diff --git a/typedapi/core/deletebyquery/delete_by_query.go b/typedapi/core/deletebyquery/delete_by_query.go index 7f40dae969..48b131d421 100644 --- a/typedapi/core/deletebyquery/delete_by_query.go +++ b/typedapi/core/deletebyquery/delete_by_query.go @@ -16,10 +16,135 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete documents. +// // Deletes documents that match the specified query. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `delete` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// When you submit a delete by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and deletes +// matching documents using internal versioning. +// If a document changes between the time that the snapshot is taken and the +// delete operation is processed, it results in a version conflict and the +// delete operation fails. +// +// NOTE: Documents with a version equal to 0 cannot be deleted using delete by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing a delete by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents to delete. +// A bulk delete request is performed for each batch of matching documents. +// If a search or bulk request is rejected, the requests are retried up to 10 +// times, with exponential back off. +// If the maximum retry limit is reached, processing halts and all failed +// requests are returned in the response. +// Any delete requests that completed successfully still stick, they are not +// rolled back. +// +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts the operation could attempt +// to delete more documents from the source than `max_docs` until it has +// successfully deleted `max_docs documents`, or it has gone through every +// document in the source query. +// +// **Throttling delete requests** +// +// To control the rate at which delete by query issues batches of delete +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to disable throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single `_bulk` request, large batch sizes +// cause Elasticsearch to create many requests and wait before starting the next +// set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Delete by query supports sliced scroll to parallelize the delete process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` lets Elasticsearch choose the number of slices to +// use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// Adding slices to the delete by query operation creates sub-requests which +// means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with slices only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices` each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the earlier point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being deleted. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many `slices` hurts +// performance. Setting `slices` higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Delete performance scales linearly across available resources with the +// number of slices. +// +// Whether query or delete performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Cancel a delete by query operation** +// +// Any delete by query can be canceled using the task cancel API. For example: +// +// ``` +// POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +// ``` +// +// The task ID can be found by using the get tasks API. +// +// Cancellation should happen quickly but might take a few seconds. +// The get task status API will continue to list the delete by query task until +// this task checks that it has been cancelled and terminates itself. package deletebyquery import ( @@ -87,8 +212,133 @@ func NewDeleteByQueryFunc(tp elastictransport.Interface) NewDeleteByQuery { } // Delete documents. +// // Deletes documents that match the specified query. // +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `delete` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// When you submit a delete by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and deletes +// matching documents using internal versioning. +// If a document changes between the time that the snapshot is taken and the +// delete operation is processed, it results in a version conflict and the +// delete operation fails. +// +// NOTE: Documents with a version equal to 0 cannot be deleted using delete by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing a delete by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents to delete. +// A bulk delete request is performed for each batch of matching documents. +// If a search or bulk request is rejected, the requests are retried up to 10 +// times, with exponential back off. +// If the maximum retry limit is reached, processing halts and all failed +// requests are returned in the response. +// Any delete requests that completed successfully still stick, they are not +// rolled back. +// +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts the operation could attempt +// to delete more documents from the source than `max_docs` until it has +// successfully deleted `max_docs documents`, or it has gone through every +// document in the source query. +// +// **Throttling delete requests** +// +// To control the rate at which delete by query issues batches of delete +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to disable throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single `_bulk` request, large batch sizes +// cause Elasticsearch to create many requests and wait before starting the next +// set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Delete by query supports sliced scroll to parallelize the delete process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` lets Elasticsearch choose the number of slices to +// use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// Adding slices to the delete by query operation creates sub-requests which +// means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with slices only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices` each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the earlier point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being deleted. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many `slices` hurts +// performance. Setting `slices` higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Delete performance scales linearly across available resources with the +// number of slices. +// +// Whether query or delete performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Cancel a delete by query operation** +// +// Any delete by query can be canceled using the task cancel API. For example: +// +// ``` +// POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +// ``` +// +// The task ID can be found by using the get tasks API. +// +// Cancellation should happen quickly but might take a few seconds. +// The get task status API will continue to list the delete by query task until +// this task checks that it has been cancelled and terminates itself. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html func New(tp elastictransport.Interface) *DeleteByQuery { r := &DeleteByQuery{ @@ -97,8 +347,6 @@ func New(tp elastictransport.Interface) *DeleteByQuery { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -310,8 +558,8 @@ func (r *DeleteByQuery) Header(key, value string) *DeleteByQuery { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams or indices, omit this parameter or use `*` or // `_all`. // API Name: index @@ -335,6 +583,8 @@ func (r *DeleteByQuery) AllowNoIndices(allownoindices bool) *DeleteByQuery { } // Analyzer Analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyzer func (r *DeleteByQuery) Analyzer(analyzer string) *DeleteByQuery { r.values.Set("analyzer", analyzer) @@ -343,6 +593,8 @@ func (r *DeleteByQuery) Analyzer(analyzer string) *DeleteByQuery { } // AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyze_wildcard func (r *DeleteByQuery) AnalyzeWildcard(analyzewildcard bool) *DeleteByQuery { r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) @@ -359,6 +611,8 @@ func (r *DeleteByQuery) Conflicts(conflicts conflicts.Conflicts) *DeleteByQuery } // DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: default_operator func (r *DeleteByQuery) DefaultOperator(defaultoperator operator.Operator) *DeleteByQuery { r.values.Set("default_operator", defaultoperator.String()) @@ -366,7 +620,10 @@ func (r *DeleteByQuery) DefaultOperator(defaultoperator operator.Operator) *Dele return r } -// Df Field to use as default where no field prefix is given in the query string. +// Df The field to use as default where no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: df func (r *DeleteByQuery) Df(df string) *DeleteByQuery { r.values.Set("df", df) @@ -374,11 +631,10 @@ func (r *DeleteByQuery) Df(df string) *DeleteByQuery { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. Valid values are: -// `all`, `open`, `closed`, `hidden`, `none`. +// It supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards func (r *DeleteByQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DeleteByQuery { tmp := []string{} @@ -409,6 +665,8 @@ func (r *DeleteByQuery) IgnoreUnavailable(ignoreunavailable bool) *DeleteByQuery // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: lenient func (r *DeleteByQuery) Lenient(lenient bool) *DeleteByQuery { r.values.Set("lenient", strconv.FormatBool(lenient)) @@ -416,8 +674,8 @@ func (r *DeleteByQuery) Lenient(lenient bool) *DeleteByQuery { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *DeleteByQuery) Preference(preference string) *DeleteByQuery { r.values.Set("preference", preference) @@ -427,6 +685,9 @@ func (r *DeleteByQuery) Preference(preference string) *DeleteByQuery { // Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query // after the request completes. +// This is different than the delete API's `refresh` parameter, which causes +// just the shard that received the delete request to be refreshed. +// Unlike the delete API, it does not support `wait_for`. // API name: refresh func (r *DeleteByQuery) Refresh(refresh bool) *DeleteByQuery { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -451,7 +712,7 @@ func (r *DeleteByQuery) RequestsPerSecond(requestspersecond string) *DeleteByQue return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *DeleteByQuery) Routing(routing string) *DeleteByQuery { r.values.Set("routing", routing) @@ -459,7 +720,7 @@ func (r *DeleteByQuery) Routing(routing string) *DeleteByQuery { return r } -// Q Query in the Lucene query string syntax. +// Q A query in the Lucene query string syntax. // API name: q func (r *DeleteByQuery) Q(q string) *DeleteByQuery { r.values.Set("q", q) @@ -467,7 +728,7 @@ func (r *DeleteByQuery) Q(q string) *DeleteByQuery { return r } -// Scroll Period to retain the search context for scrolling. +// Scroll The period to retain the search context for scrolling. // API name: scroll func (r *DeleteByQuery) Scroll(duration string) *DeleteByQuery { r.values.Set("scroll", duration) @@ -475,7 +736,7 @@ func (r *DeleteByQuery) Scroll(duration string) *DeleteByQuery { return r } -// ScrollSize Size of the scroll request that powers the operation. +// ScrollSize The size of the scroll request that powers the operation. // API name: scroll_size func (r *DeleteByQuery) ScrollSize(scrollsize string) *DeleteByQuery { r.values.Set("scroll_size", scrollsize) @@ -483,8 +744,8 @@ func (r *DeleteByQuery) ScrollSize(scrollsize string) *DeleteByQuery { return r } -// SearchTimeout Explicit timeout for each search request. -// Defaults to no timeout. +// SearchTimeout The explicit timeout for each search request. +// It defaults to no timeout. // API name: search_timeout func (r *DeleteByQuery) SearchTimeout(duration string) *DeleteByQuery { r.values.Set("search_timeout", duration) @@ -493,7 +754,7 @@ func (r *DeleteByQuery) SearchTimeout(duration string) *DeleteByQuery { } // SearchType The type of the search operation. -// Available options: `query_then_fetch`, `dfs_query_then_fetch`. +// Available options include `query_then_fetch` and `dfs_query_then_fetch`. // API name: search_type func (r *DeleteByQuery) SearchType(searchtype searchtype.SearchType) *DeleteByQuery { r.values.Set("search_type", searchtype.String()) @@ -509,7 +770,7 @@ func (r *DeleteByQuery) Slices(slices string) *DeleteByQuery { return r } -// Sort A comma-separated list of : pairs. +// Sort A comma-separated list of `:` pairs. // API name: sort func (r *DeleteByQuery) Sort(sorts ...string) *DeleteByQuery { tmp := []string{} @@ -521,7 +782,7 @@ func (r *DeleteByQuery) Sort(sorts ...string) *DeleteByQuery { return r } -// Stats Specific `tag` of the request for logging and statistical purposes. +// Stats The specific `tag` of the request for logging and statistical purposes. // API name: stats func (r *DeleteByQuery) Stats(stats ...string) *DeleteByQuery { tmp := []string{} @@ -533,9 +794,10 @@ func (r *DeleteByQuery) Stats(stats ...string) *DeleteByQuery { return r } -// TerminateAfter Maximum number of documents to collect for each shard. +// TerminateAfter The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. +// // Use with caution. // Elasticsearch applies this parameter to each shard handling the request. // When possible, let Elasticsearch perform early termination automatically. @@ -548,7 +810,7 @@ func (r *DeleteByQuery) TerminateAfter(terminateafter string) *DeleteByQuery { return r } -// Timeout Period each deletion request waits for active shards. +// Timeout The period each deletion request waits for active shards. // API name: timeout func (r *DeleteByQuery) Timeout(duration string) *DeleteByQuery { r.values.Set("timeout", duration) @@ -566,8 +828,10 @@ func (r *DeleteByQuery) Version(version bool) *DeleteByQuery { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to all or any positive integer up to the total number of shards in the +// Set to `all` or any positive integer up to the total number of shards in the // index (`number_of_replicas+1`). +// The `timeout` value controls how long each write request waits for +// unavailable shards to become available. // API name: wait_for_active_shards func (r *DeleteByQuery) WaitForActiveShards(waitforactiveshards string) *DeleteByQuery { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -576,6 +840,11 @@ func (r *DeleteByQuery) WaitForActiveShards(waitforactiveshards string) *DeleteB } // WaitForCompletion If `true`, the request blocks until the operation is complete. +// If `false`, Elasticsearch performs some preflight checks, launches the +// request, and returns a task you can use to cancel or get the status of the +// task. Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. When you are done with a task, you should delete the +// task document so Elasticsearch can reclaim the space. // API name: wait_for_completion func (r *DeleteByQuery) WaitForCompletion(waitforcompletion bool) *DeleteByQuery { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) @@ -627,30 +896,42 @@ func (r *DeleteByQuery) Pretty(pretty bool) *DeleteByQuery { return r } -// MaxDocs The maximum number of documents to delete. +// The maximum number of documents to delete. // API name: max_docs func (r *DeleteByQuery) MaxDocs(maxdocs int64) *DeleteByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxDocs = &maxdocs return r } -// Query Specifies the documents to delete using the Query DSL. +// The documents to delete specified with Query DSL. // API name: query -func (r *DeleteByQuery) Query(query *types.Query) *DeleteByQuery { +func (r *DeleteByQuery) Query(query types.QueryVariant) *DeleteByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Slice Slice the request manually using the provided slice ID and total number of +// Slice the request manually using the provided slice ID and total number of // slices. // API name: slice -func (r *DeleteByQuery) Slice(slice *types.SlicedScroll) *DeleteByQuery { +func (r *DeleteByQuery) Slice(slice types.SlicedScrollVariant) *DeleteByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } diff --git a/typedapi/core/deletebyquery/request.go b/typedapi/core/deletebyquery/request.go index 2edfa30f7c..93973384ce 100644 --- a/typedapi/core/deletebyquery/request.go +++ b/typedapi/core/deletebyquery/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletebyquery @@ -29,12 +29,12 @@ import ( // Request holds the request body struct for the package deletebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/delete_by_query/DeleteByQueryRequest.ts#L36-L210 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/delete_by_query/DeleteByQueryRequest.ts#L36-L310 type Request struct { // MaxDocs The maximum number of documents to delete. MaxDocs *int64 `json:"max_docs,omitempty"` - // Query Specifies the documents to delete using the Query DSL. + // Query The documents to delete specified with Query DSL. Query *types.Query `json:"query,omitempty"` // Slice Slice the request manually using the provided slice ID and total number of // slices. diff --git a/typedapi/core/deletebyquery/response.go b/typedapi/core/deletebyquery/response.go index e794ec5612..8f40aa6a0b 100644 --- a/typedapi/core/deletebyquery/response.go +++ b/typedapi/core/deletebyquery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletebyquery @@ -26,24 +26,53 @@ import ( // Response holds the response body struct for the package deletebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/delete_by_query/DeleteByQueryResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/delete_by_query/DeleteByQueryResponse.ts#L26-L88 type Response struct { - Batches *int64 `json:"batches,omitempty"` - Deleted *int64 `json:"deleted,omitempty"` - Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` - Noops *int64 `json:"noops,omitempty"` - RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` - Retries *types.Retries `json:"retries,omitempty"` - SliceId *int `json:"slice_id,omitempty"` - Task types.TaskId `json:"task,omitempty"` - Throttled types.Duration `json:"throttled,omitempty"` - ThrottledMillis *int64 `json:"throttled_millis,omitempty"` - ThrottledUntil types.Duration `json:"throttled_until,omitempty"` - ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` - TimedOut *bool `json:"timed_out,omitempty"` - Took *int64 `json:"took,omitempty"` - Total *int64 `json:"total,omitempty"` - VersionConflicts *int64 `json:"version_conflicts,omitempty"` + + // Batches The number of scroll responses pulled back by the delete by query. + Batches *int64 `json:"batches,omitempty"` + // Deleted The number of documents that were successfully deleted. + Deleted *int64 `json:"deleted,omitempty"` + // Failures An array of failures if there were any unrecoverable errors during the + // process. + // If this array is not empty, the request ended abnormally because of those + // failures. + // Delete by query is implemented using batches and any failures cause the + // entire process to end but all failures in the current batch are collected + // into the array. + // You can use the `conflicts` option to prevent reindex from ending on version + // conflicts. + Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` + // Noops This field is always equal to zero for delete by query. + // It exists only so that delete by query, update by query, and reindex APIs + // return responses with the same structure. + Noops *int64 `json:"noops,omitempty"` + // RequestsPerSecond The number of requests per second effectively run during the delete by query. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Retries The number of retries attempted by delete by query. + // `bulk` is the number of bulk actions retried. + // `search` is the number of search actions retried. + Retries *types.Retries `json:"retries,omitempty"` + SliceId *int `json:"slice_id,omitempty"` + Task types.TaskId `json:"task,omitempty"` + Throttled types.Duration `json:"throttled,omitempty"` + // ThrottledMillis The number of milliseconds the request slept to conform to + // `requests_per_second`. + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` + ThrottledUntil types.Duration `json:"throttled_until,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in a `_delete_by_query` response. + // It has meaning only when using the task API, where it indicates the next time + // (in milliseconds since epoch) a throttled request will be run again in order + // to conform to `requests_per_second`. + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` + // TimedOut If `true`, some requests run during the delete by query operation timed out. + TimedOut *bool `json:"timed_out,omitempty"` + // Took The number of milliseconds from start to end of the whole operation. + Took *int64 `json:"took,omitempty"` + // Total The number of documents that were successfully processed. + Total *int64 `json:"total,omitempty"` + // VersionConflicts The number of version conflicts that the delete by query hit. + VersionConflicts *int64 `json:"version_conflicts,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go b/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go index 13b3773781..75561a70f5 100644 --- a/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go +++ b/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go @@ -16,10 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Changes the number of requests per second for a particular Delete By Query +// Throttle a delete by query operation. +// +// Change the number of requests per second for a particular delete by query // operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. package deletebyqueryrethrottle import ( @@ -77,10 +82,15 @@ func NewDeleteByQueryRethrottleFunc(tp elastictransport.Interface) NewDeleteByQu } } -// Changes the number of requests per second for a particular Delete By Query +// Throttle a delete by query operation. +// +// Change the number of requests per second for a particular delete by query // operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html#docs-delete-by-query-rethrottle func New(tp elastictransport.Interface) *DeleteByQueryRethrottle { r := &DeleteByQueryRethrottle{ transport: tp, @@ -302,6 +312,7 @@ func (r *DeleteByQueryRethrottle) _taskid(taskid string) *DeleteByQueryRethrottl } // RequestsPerSecond The throttle for this request in sub-requests per second. +// To disable throttling, set it to `-1`. // API name: requests_per_second func (r *DeleteByQueryRethrottle) RequestsPerSecond(requestspersecond string) *DeleteByQueryRethrottle { r.values.Set("requests_per_second", requestspersecond) diff --git a/typedapi/core/deletebyqueryrethrottle/response.go b/typedapi/core/deletebyqueryrethrottle/response.go index 9db20cef9f..aa75087cd1 100644 --- a/typedapi/core/deletebyqueryrethrottle/response.go +++ b/typedapi/core/deletebyqueryrethrottle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletebyqueryrethrottle @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package deletebyqueryrethrottle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/delete_by_query_rethrottle/DeleteByQueryRethrottleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/delete_by_query_rethrottle/DeleteByQueryRethrottleResponse.ts#L22-L24 type Response struct { NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` // Nodes Task information grouped by node, if `group_by` was set to `node` (the diff --git a/typedapi/core/deletescript/delete_script.go b/typedapi/core/deletescript/delete_script.go index 319ffa554d..5d03685910 100644 --- a/typedapi/core/deletescript/delete_script.go +++ b/typedapi/core/deletescript/delete_script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a script or search template. // Deletes a stored script or search template. @@ -80,7 +80,7 @@ func NewDeleteScriptFunc(tp elastictransport.Interface) NewDeleteScript { // Delete a script or search template. // Deletes a stored script or search template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-stored-script-api.html func New(tp elastictransport.Interface) *DeleteScript { r := &DeleteScript{ transport: tp, @@ -290,7 +290,7 @@ func (r *DeleteScript) Header(key, value string) *DeleteScript { return r } -// Id Identifier for the stored script or search template. +// Id The identifier for the stored script or search template. // API Name: id func (r *DeleteScript) _id(id string) *DeleteScript { r.paramSet |= idMask @@ -299,9 +299,10 @@ func (r *DeleteScript) _id(id string) *DeleteScript { return r } -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. // If no response is received before the timeout expires, the request fails and // returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: master_timeout func (r *DeleteScript) MasterTimeout(duration string) *DeleteScript { r.values.Set("master_timeout", duration) @@ -309,9 +310,10 @@ func (r *DeleteScript) MasterTimeout(duration string) *DeleteScript { return r } -// Timeout Period to wait for a response. +// Timeout The period to wait for a response. // If no response is received before the timeout expires, the request fails and // returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: timeout func (r *DeleteScript) Timeout(duration string) *DeleteScript { r.values.Set("timeout", duration) diff --git a/typedapi/core/deletescript/response.go b/typedapi/core/deletescript/response.go index f53c0962b8..43e6bea025 100644 --- a/typedapi/core/deletescript/response.go +++ b/typedapi/core/deletescript/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletescript // Response holds the response body struct for the package deletescript // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/delete_script/DeleteScriptResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/delete_script/DeleteScriptResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/core/exists/exists.go b/typedapi/core/exists/exists.go index 131eff90ac..b0ccbe0932 100644 --- a/typedapi/core/exists/exists.go +++ b/typedapi/core/exists/exists.go @@ -16,10 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Check a document. -// Checks if a specified document exists. +// +// Verify that a document exists. +// For example, check to see if a document with the `_id` 0 exists: +// +// ``` +// HEAD my-index-000001/_doc/0 +// ``` +// +// If the document exists, the API returns a status code of `200 - OK`. +// If the document doesn’t exist, the API returns `404 - Not Found`. +// +// **Versioning support** +// +// You can use the `version` parameter to check the document only if its current +// version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. package exists import ( @@ -82,7 +103,28 @@ func NewExistsFunc(tp elastictransport.Interface) NewExists { } // Check a document. -// Checks if a specified document exists. +// +// Verify that a document exists. +// For example, check to see if a document with the `_id` 0 exists: +// +// ``` +// HEAD my-index-000001/_doc/0 +// ``` +// +// If the document exists, the API returns a status code of `200 - OK`. +// If the document doesn’t exist, the API returns `404 - Not Found`. +// +// **Versioning support** +// +// You can use the `version` parameter to check the document only if its current +// version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html func New(tp elastictransport.Interface) *Exists { @@ -251,7 +293,7 @@ func (r *Exists) Header(key, value string) *Exists { return r } -// Id Identifier of the document. +// Id A unique document identifier. // API Name: id func (r *Exists) _id(id string) *Exists { r.paramSet |= idMask @@ -260,8 +302,8 @@ func (r *Exists) _id(id string) *Exists { return r } -// Index Comma-separated list of data streams, indices, and aliases. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases. +// It supports wildcards (`*`). // API Name: index func (r *Exists) _index(index string) *Exists { r.paramSet |= indexMask @@ -270,8 +312,16 @@ func (r *Exists) _index(index string) *Exists { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. +// +// If it is set to `_local`, the operation will prefer to be run on a local +// allocated shard when possible. +// If it is set to a custom value, the value is used to guarantee that the same +// shards will be used for the same custom value. +// This can help with "jumping values" when hitting different shards in +// different refresh states. +// A sample value can be something like the web session ID or the user name. // API name: preference func (r *Exists) Preference(preference string) *Exists { r.values.Set("preference", preference) @@ -287,8 +337,10 @@ func (r *Exists) Realtime(realtime bool) *Exists { return r } -// Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query -// after the request completes. +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). // API name: refresh func (r *Exists) Refresh(refresh bool) *Exists { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -296,7 +348,7 @@ func (r *Exists) Refresh(refresh bool) *Exists { return r } -// Routing Target the specified primary shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Exists) Routing(routing string) *Exists { r.values.Set("routing", routing) @@ -304,8 +356,8 @@ func (r *Exists) Routing(routing string) *Exists { return r } -// Source_ `true` or `false` to return the `_source` field or not, or a list of fields -// to return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. // API name: _source func (r *Exists) Source_(sourceconfigparam string) *Exists { r.values.Set("_source", sourceconfigparam) @@ -313,7 +365,10 @@ func (r *Exists) Source_(sourceconfigparam string) *Exists { return r } -// SourceExcludes_ A comma-separated list of source fields to exclude in the response. +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_excludes func (r *Exists) SourceExcludes_(fields ...string) *Exists { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -322,6 +377,10 @@ func (r *Exists) SourceExcludes_(fields ...string) *Exists { } // SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_includes func (r *Exists) SourceIncludes_(fields ...string) *Exists { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -329,9 +388,9 @@ func (r *Exists) SourceIncludes_(fields ...string) *Exists { return r } -// StoredFields List of stored fields to return as part of a hit. +// StoredFields A comma-separated list of stored fields to return as part of a hit. // If no fields are specified, no stored fields are included in the response. -// If this field is specified, the `_source` parameter defaults to false. +// If this field is specified, the `_source` parameter defaults to `false`. // API name: stored_fields func (r *Exists) StoredFields(fields ...string) *Exists { r.values.Set("stored_fields", strings.Join(fields, ",")) @@ -349,7 +408,7 @@ func (r *Exists) Version(versionnumber string) *Exists { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *Exists) VersionType(versiontype versiontype.VersionType) *Exists { r.values.Set("version_type", versiontype.String()) diff --git a/typedapi/core/existssource/exists_source.go b/typedapi/core/existssource/exists_source.go index d6d7ee0372..3eb95f26c9 100644 --- a/typedapi/core/existssource/exists_source.go +++ b/typedapi/core/existssource/exists_source.go @@ -16,10 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Check for a document source. -// Checks if a document's `_source` is stored. +// +// Check whether a document source exists in an index. +// For example: +// +// ``` +// HEAD my-index-000001/_source/1 +// ``` +// +// A document's source is not available if it is disabled in the mapping. package existssource import ( @@ -82,7 +90,15 @@ func NewExistsSourceFunc(tp elastictransport.Interface) NewExistsSource { } // Check for a document source. -// Checks if a document's `_source` is stored. +// +// Check whether a document source exists in an index. +// For example: +// +// ``` +// HEAD my-index-000001/_source/1 +// ``` +// +// A document's source is not available if it is disabled in the mapping. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html func New(tp elastictransport.Interface) *ExistsSource { @@ -251,7 +267,7 @@ func (r *ExistsSource) Header(key, value string) *ExistsSource { return r } -// Id Identifier of the document. +// Id A unique identifier for the document. // API Name: id func (r *ExistsSource) _id(id string) *ExistsSource { r.paramSet |= idMask @@ -260,8 +276,8 @@ func (r *ExistsSource) _id(id string) *ExistsSource { return r } -// Index Comma-separated list of data streams, indices, and aliases. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases. +// It supports wildcards (`*`). // API Name: index func (r *ExistsSource) _index(index string) *ExistsSource { r.paramSet |= indexMask @@ -270,8 +286,8 @@ func (r *ExistsSource) _index(index string) *ExistsSource { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. // API name: preference func (r *ExistsSource) Preference(preference string) *ExistsSource { r.values.Set("preference", preference) @@ -279,7 +295,7 @@ func (r *ExistsSource) Preference(preference string) *ExistsSource { return r } -// Realtime If true, the request is real-time as opposed to near-real-time. +// Realtime If `true`, the request is real-time as opposed to near-real-time. // API name: realtime func (r *ExistsSource) Realtime(realtime bool) *ExistsSource { r.values.Set("realtime", strconv.FormatBool(realtime)) @@ -287,8 +303,10 @@ func (r *ExistsSource) Realtime(realtime bool) *ExistsSource { return r } -// Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query -// after the request completes. +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). // API name: refresh func (r *ExistsSource) Refresh(refresh bool) *ExistsSource { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -296,7 +314,7 @@ func (r *ExistsSource) Refresh(refresh bool) *ExistsSource { return r } -// Routing Target the specified primary shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *ExistsSource) Routing(routing string) *ExistsSource { r.values.Set("routing", routing) @@ -304,8 +322,8 @@ func (r *ExistsSource) Routing(routing string) *ExistsSource { return r } -// Source_ `true` or `false` to return the `_source` field or not, or a list of fields -// to return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. // API name: _source func (r *ExistsSource) Source_(sourceconfigparam string) *ExistsSource { r.values.Set("_source", sourceconfigparam) @@ -329,9 +347,8 @@ func (r *ExistsSource) SourceIncludes_(fields ...string) *ExistsSource { return r } -// Version Explicit version number for concurrency control. -// The specified version must match the current version of the document for the -// request to succeed. +// Version The version number for concurrency control. +// It must match the current version of the document for the request to succeed. // API name: version func (r *ExistsSource) Version(versionnumber string) *ExistsSource { r.values.Set("version", versionnumber) @@ -339,7 +356,7 @@ func (r *ExistsSource) Version(versionnumber string) *ExistsSource { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *ExistsSource) VersionType(versiontype versiontype.VersionType) *ExistsSource { r.values.Set("version_type", versiontype.String()) diff --git a/typedapi/core/explain/explain.go b/typedapi/core/explain/explain.go index b93bc89b32..4e4726649d 100644 --- a/typedapi/core/explain/explain.go +++ b/typedapi/core/explain/explain.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Explain a document match result. -// Returns information about why a specific document matches, or doesn’t match, -// a query. +// Get information about why a specific document matches, or doesn't match, a +// query. +// It computes a score explanation for a query and a specific document. package explain import ( @@ -90,8 +91,9 @@ func NewExplainFunc(tp elastictransport.Interface) NewExplain { } // Explain a document match result. -// Returns information about why a specific document matches, or doesn’t match, -// a query. +// Get information about why a specific document matches, or doesn't match, a +// query. +// It computes a score explanation for a query and a specific document. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html func New(tp elastictransport.Interface) *Explain { @@ -101,8 +103,6 @@ func New(tp elastictransport.Interface) *Explain { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -320,7 +320,7 @@ func (r *Explain) Header(key, value string) *Explain { return r } -// Id Defines the document ID. +// Id The document identifier. // API Name: id func (r *Explain) _id(id string) *Explain { r.paramSet |= idMask @@ -329,7 +329,7 @@ func (r *Explain) _id(id string) *Explain { return r } -// Index Index names used to limit the request. +// Index Index names that are used to limit the request. // Only a single index name can be provided to this parameter. // API Name: index func (r *Explain) _index(index string) *Explain { @@ -339,8 +339,8 @@ func (r *Explain) _index(index string) *Explain { return r } -// Analyzer Analyzer to use for the query string. -// This parameter can only be used when the `q` query string parameter is +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyzer func (r *Explain) Analyzer(analyzer string) *Explain { @@ -350,6 +350,8 @@ func (r *Explain) Analyzer(analyzer string) *Explain { } // AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyze_wildcard func (r *Explain) AnalyzeWildcard(analyzewildcard bool) *Explain { r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) @@ -358,6 +360,8 @@ func (r *Explain) AnalyzeWildcard(analyzewildcard bool) *Explain { } // DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: default_operator func (r *Explain) DefaultOperator(defaultoperator operator.Operator) *Explain { r.values.Set("default_operator", defaultoperator.String()) @@ -365,7 +369,10 @@ func (r *Explain) DefaultOperator(defaultoperator operator.Operator) *Explain { return r } -// Df Field to use as default where no field prefix is given in the query string. +// Df The field to use as default where no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: df func (r *Explain) Df(df string) *Explain { r.values.Set("df", df) @@ -375,6 +382,8 @@ func (r *Explain) Df(df string) *Explain { // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: lenient func (r *Explain) Lenient(lenient bool) *Explain { r.values.Set("lenient", strconv.FormatBool(lenient)) @@ -382,8 +391,8 @@ func (r *Explain) Lenient(lenient bool) *Explain { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *Explain) Preference(preference string) *Explain { r.values.Set("preference", preference) @@ -391,7 +400,7 @@ func (r *Explain) Preference(preference string) *Explain { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Explain) Routing(routing string) *Explain { r.values.Set("routing", routing) @@ -399,7 +408,7 @@ func (r *Explain) Routing(routing string) *Explain { return r } -// Source_ True or false to return the `_source` field or not, or a list of fields to +// Source_ `True` or `false` to return the `_source` field or not or a list of fields to // return. // API name: _source func (r *Explain) Source_(sourceconfigparam string) *Explain { @@ -409,6 +418,9 @@ func (r *Explain) Source_(sourceconfigparam string) *Explain { } // SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_excludes func (r *Explain) SourceExcludes_(fields ...string) *Explain { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -417,6 +429,10 @@ func (r *Explain) SourceExcludes_(fields ...string) *Explain { } // SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_includes func (r *Explain) SourceIncludes_(fields ...string) *Explain { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -432,7 +448,7 @@ func (r *Explain) StoredFields(fields ...string) *Explain { return r } -// Q Query in the Lucene query string syntax. +// Q The query in the Lucene query string syntax. // API name: q func (r *Explain) Q(q string) *Explain { r.values.Set("q", q) @@ -484,11 +500,15 @@ func (r *Explain) Pretty(pretty bool) *Explain { return r } -// Query Defines the search definition using the Query DSL. +// Defines the search definition using the Query DSL. // API name: query -func (r *Explain) Query(query *types.Query) *Explain { +func (r *Explain) Query(query types.QueryVariant) *Explain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } diff --git a/typedapi/core/explain/request.go b/typedapi/core/explain/request.go index c7357a9dc4..a0de0a7ed1 100644 --- a/typedapi/core/explain/request.go +++ b/typedapi/core/explain/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package explain @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package explain // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/explain/ExplainRequest.ts#L26-L106 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/explain/ExplainRequest.ts#L26-L125 type Request struct { // Query Defines the search definition using the Query DSL. diff --git a/typedapi/core/explain/response.go b/typedapi/core/explain/response.go index eece18633d..3774223f05 100644 --- a/typedapi/core/explain/response.go +++ b/typedapi/core/explain/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package explain @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explain // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/explain/ExplainResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/explain/ExplainResponse.ts#L23-L31 type Response struct { Explanation *types.ExplanationDetail `json:"explanation,omitempty"` Get *types.InlineGet `json:"get,omitempty"` diff --git a/typedapi/core/fieldcaps/field_caps.go b/typedapi/core/fieldcaps/field_caps.go index 4943a78373..a632c01eb7 100644 --- a/typedapi/core/fieldcaps/field_caps.go +++ b/typedapi/core/fieldcaps/field_caps.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// The field capabilities API returns the information about the capabilities of -// fields among multiple indices. -// The field capabilities API returns runtime fields like any other field. For -// example, a runtime field with a type -// of keyword is returned as any other field that belongs to the `keyword` -// family. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get the field capabilities. +// +// Get information about the capabilities of fields among multiple indices. +// +// For data streams, the API returns field capabilities among the stream’s +// backing indices. +// It returns runtime fields like any other field. +// For example, a runtime field with a type of keyword is returned the same as +// any other field that belongs to the `keyword` family. package fieldcaps import ( @@ -85,12 +88,15 @@ func NewFieldCapsFunc(tp elastictransport.Interface) NewFieldCaps { } } -// The field capabilities API returns the information about the capabilities of -// fields among multiple indices. -// The field capabilities API returns runtime fields like any other field. For -// example, a runtime field with a type -// of keyword is returned as any other field that belongs to the `keyword` -// family. +// Get the field capabilities. +// +// Get information about the capabilities of fields among multiple indices. +// +// For data streams, the API returns field capabilities among the stream’s +// backing indices. +// It returns runtime fields like any other field. +// For example, a runtime field with a type of keyword is returned the same as +// any other field that belongs to the `keyword` family. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html func New(tp elastictransport.Interface) *FieldCaps { @@ -100,8 +106,6 @@ func New(tp elastictransport.Interface) *FieldCaps { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -318,9 +322,9 @@ func (r *FieldCaps) Header(key, value string) *FieldCaps { return r } -// Index Comma-separated list of data streams, indices, and aliases used to limit the -// request. Supports wildcards (*). To target all data streams and indices, omit -// this parameter or use * or _all. +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. Supports wildcards (*). To target all data streams and indices, +// omit this parameter or use * or _all. // API Name: index func (r *FieldCaps) Index(index string) *FieldCaps { r.paramSet |= indexMask @@ -342,7 +346,7 @@ func (r *FieldCaps) AllowNoIndices(allownoindices bool) *FieldCaps { return r } -// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// ExpandWildcards The type of index that wildcard patterns can match. If the request can target // data streams, this argument determines whether wildcard expressions match // hidden data streams. Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards @@ -372,8 +376,7 @@ func (r *FieldCaps) IncludeUnmapped(includeunmapped bool) *FieldCaps { return r } -// Filters An optional set of filters: can include -// +metadata,-metadata,-nested,-multifield,-parent +// Filters A comma-separated list of filters to apply to the response. // API name: filters func (r *FieldCaps) Filters(filters string) *FieldCaps { r.values.Set("filters", filters) @@ -381,7 +384,10 @@ func (r *FieldCaps) Filters(filters string) *FieldCaps { return r } -// Types Only return results for fields that have one of the types in the list +// Types A comma-separated list of field types to include. +// Any fields that do not match one of these types will be excluded from the +// results. +// It defaults to empty, meaning that all field types are returned. // API name: types func (r *FieldCaps) Types(types ...string) *FieldCaps { tmp := []string{} @@ -445,32 +451,54 @@ func (r *FieldCaps) Pretty(pretty bool) *FieldCaps { return r } -// Fields List of fields to retrieve capabilities for. Wildcard (`*`) expressions are +// A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are // supported. // API name: fields func (r *FieldCaps) Fields(fields ...string) *FieldCaps { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Fields = fields return r } -// IndexFilter Allows to filter indices if the provided query rewrites to match_none on -// every shard. +// Filter indices if the provided query rewrites to `match_none` on every shard. +// +// IMPORTANT: The filtering is done on a best-effort basis, it uses index +// statistics and mappings to rewrite queries to `match_none` instead of fully +// running the request. +// For instance a range query over a date field can rewrite to `match_none` if +// all documents within a shard (including deleted documents) are outside of the +// provided range. +// However, not all queries can rewrite to `match_none` so this API may return +// an index even if the provided filter matches no document. // API name: index_filter -func (r *FieldCaps) IndexFilter(indexfilter *types.Query) *FieldCaps { +func (r *FieldCaps) IndexFilter(indexfilter types.QueryVariant) *FieldCaps { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexFilter = indexfilter + r.req.IndexFilter = indexfilter.QueryCaster() return r } -// RuntimeMappings Defines ad-hoc runtime fields in the request similar to the way it is done in +// Define ad-hoc runtime fields in the request similar to the way it is done in // search requests. // These fields exist only as part of the query and take precedence over fields // defined with the same name in the index mappings. // API name: runtime_mappings -func (r *FieldCaps) RuntimeMappings(runtimefields types.RuntimeFields) *FieldCaps { - r.req.RuntimeMappings = runtimefields +func (r *FieldCaps) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *FieldCaps { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } diff --git a/typedapi/core/fieldcaps/request.go b/typedapi/core/fieldcaps/request.go index de6e740b16..fe04fd31ab 100644 --- a/typedapi/core/fieldcaps/request.go +++ b/typedapi/core/fieldcaps/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package fieldcaps @@ -32,16 +32,24 @@ import ( // Request holds the request body struct for the package fieldcaps // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/field_caps/FieldCapabilitiesRequest.ts#L25-L106 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/field_caps/FieldCapabilitiesRequest.ts#L25-L130 type Request struct { - // Fields List of fields to retrieve capabilities for. Wildcard (`*`) expressions are + // Fields A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are // supported. Fields []string `json:"fields,omitempty"` - // IndexFilter Allows to filter indices if the provided query rewrites to match_none on - // every shard. + // IndexFilter Filter indices if the provided query rewrites to `match_none` on every shard. + // + // IMPORTANT: The filtering is done on a best-effort basis, it uses index + // statistics and mappings to rewrite queries to `match_none` instead of fully + // running the request. + // For instance a range query over a date field can rewrite to `match_none` if + // all documents within a shard (including deleted documents) are outside of the + // provided range. + // However, not all queries can rewrite to `match_none` so this API may return + // an index even if the provided filter matches no document. IndexFilter *types.Query `json:"index_filter,omitempty"` - // RuntimeMappings Defines ad-hoc runtime fields in the request similar to the way it is done in + // RuntimeMappings Define ad-hoc runtime fields in the request similar to the way it is done in // search requests. // These fields exist only as part of the query and take precedence over fields // defined with the same name in the index mappings. diff --git a/typedapi/core/fieldcaps/response.go b/typedapi/core/fieldcaps/response.go index dcb6e2d423..f9710b9d81 100644 --- a/typedapi/core/fieldcaps/response.go +++ b/typedapi/core/fieldcaps/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package fieldcaps @@ -32,10 +32,12 @@ import ( // Response holds the response body struct for the package fieldcaps // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/field_caps/FieldCapabilitiesResponse.ts#L24-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/field_caps/FieldCapabilitiesResponse.ts#L24-L38 type Response struct { - Fields map[string]map[string]types.FieldCapability `json:"fields"` - Indices []string `json:"indices"` + Fields map[string]map[string]types.FieldCapability `json:"fields"` + // Indices The list of indices where this field has the same type family, or null if all + // indices have the same type family for the field. + Indices []string `json:"indices"` } // NewResponse returns a Response diff --git a/typedapi/core/get/get.go b/typedapi/core/get/get.go index 5df662380d..cedb1941c8 100644 --- a/typedapi/core/get/get.go +++ b/typedapi/core/get/get.go @@ -16,10 +16,82 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get a document by its ID. -// Retrieves the document with the specified ID from an index. +// +// Get a document and its source or stored fields from an index. +// +// By default, this API is realtime and is not affected by the refresh rate of +// the index (when data will become visible for search). +// In the case where stored fields are requested with the `stored_fields` +// parameter and the document has been updated but is not yet refreshed, the API +// will have to parse and analyze the source to extract the stored fields. +// To turn off realtime behavior, set the `realtime` parameter to false. +// +// **Source filtering** +// +// By default, the API returns the contents of the `_source` field unless you +// have used the `stored_fields` parameter or the `_source` field is turned off. +// You can turn off `_source` retrieval by using the `_source` parameter: +// +// ``` +// GET my-index-000001/_doc/0?_source=false +// ``` +// +// If you only need one or two fields from the `_source`, use the +// `_source_includes` or `_source_excludes` parameters to include or filter out +// particular fields. +// This can be helpful with large documents where partial retrieval can save on +// network overhead +// Both parameters take a comma separated list of fields or wildcard +// expressions. +// For example: +// +// ``` +// GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +// ``` +// +// If you only want to specify includes, you can use a shorter notation: +// +// ``` +// GET my-index-000001/_doc/0?_source=*.id +// ``` +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to retrieve a document. +// For example: +// +// ``` +// GET my-index-000001/_doc/2?routing=user1 +// ``` +// +// This request gets the document with ID 2, but it is routed based on the user. +// The document is not fetched if the correct routing is not specified. +// +// **Distributed** +// +// The GET operation is hashed into a specific shard ID. +// It is then redirected to one of the replicas within that shard ID and returns +// the result. +// The replicas are the primary shard and its replicas within that shard ID +// group. +// This means that the more replicas you have, the better your GET scaling will +// be. +// +// **Versioning support** +// +// You can use the `version` parameter to retrieve the document only if its +// current version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. package get import ( @@ -85,7 +157,79 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } // Get a document by its ID. -// Retrieves the document with the specified ID from an index. +// +// Get a document and its source or stored fields from an index. +// +// By default, this API is realtime and is not affected by the refresh rate of +// the index (when data will become visible for search). +// In the case where stored fields are requested with the `stored_fields` +// parameter and the document has been updated but is not yet refreshed, the API +// will have to parse and analyze the source to extract the stored fields. +// To turn off realtime behavior, set the `realtime` parameter to false. +// +// **Source filtering** +// +// By default, the API returns the contents of the `_source` field unless you +// have used the `stored_fields` parameter or the `_source` field is turned off. +// You can turn off `_source` retrieval by using the `_source` parameter: +// +// ``` +// GET my-index-000001/_doc/0?_source=false +// ``` +// +// If you only need one or two fields from the `_source`, use the +// `_source_includes` or `_source_excludes` parameters to include or filter out +// particular fields. +// This can be helpful with large documents where partial retrieval can save on +// network overhead +// Both parameters take a comma separated list of fields or wildcard +// expressions. +// For example: +// +// ``` +// GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +// ``` +// +// If you only want to specify includes, you can use a shorter notation: +// +// ``` +// GET my-index-000001/_doc/0?_source=*.id +// ``` +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to retrieve a document. +// For example: +// +// ``` +// GET my-index-000001/_doc/2?routing=user1 +// ``` +// +// This request gets the document with ID 2, but it is routed based on the user. +// The document is not fetched if the correct routing is not specified. +// +// **Distributed** +// +// The GET operation is hashed into a specific shard ID. +// It is then redirected to one of the replicas within that shard ID and returns +// the result. +// The replicas are the primary shard and its replicas within that shard ID +// group. +// This means that the more replicas you have, the better your GET scaling will +// be. +// +// **Versioning support** +// +// You can use the `version` parameter to retrieve the document only if its +// current version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html func New(tp elastictransport.Interface) *Get { @@ -339,7 +483,7 @@ func (r *Get) Header(key, value string) *Get { return r } -// Id Unique identifier of the document. +// Id A unique document identifier. // API Name: id func (r *Get) _id(id string) *Get { r.paramSet |= idMask @@ -348,7 +492,7 @@ func (r *Get) _id(id string) *Get { return r } -// Index Name of the index that contains the document. +// Index The name of the index that contains the document. // API Name: index func (r *Get) _index(index string) *Get { r.paramSet |= indexMask @@ -357,11 +501,11 @@ func (r *Get) _index(index string) *Get { return r } -// ForceSyntheticSource Should this request force synthetic _source? -// Use this to test if the mapping supports synthetic _source and to get a sense -// of the worst case performance. -// Fetches with this enabled will be slower the enabling synthetic source -// natively in the index. +// ForceSyntheticSource Indicates whether the request forces synthetic `_source`. +// Use this paramater to test if the mapping supports synthetic `_source` and to +// get a sense of the worst case performance. +// Fetches with this parameter enabled will be slower than enabling synthetic +// source natively in the index. // API name: force_synthetic_source func (r *Get) ForceSyntheticSource(forcesyntheticsource bool) *Get { r.values.Set("force_synthetic_source", strconv.FormatBool(forcesyntheticsource)) @@ -369,8 +513,16 @@ func (r *Get) ForceSyntheticSource(forcesyntheticsource bool) *Get { return r } -// Preference Specifies the node or shard the operation should be performed on. Random by -// default. +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. +// +// If it is set to `_local`, the operation will prefer to be run on a local +// allocated shard when possible. +// If it is set to a custom value, the value is used to guarantee that the same +// shards will be used for the same custom value. +// This can help with "jumping values" when hitting different shards in +// different refresh states. +// A sample value can be something like the web session ID or the user name. // API name: preference func (r *Get) Preference(preference string) *Get { r.values.Set("preference", preference) @@ -386,8 +538,10 @@ func (r *Get) Realtime(realtime bool) *Get { return r } -// Refresh If true, Elasticsearch refreshes the affected shards to make this operation -// visible to search. If false, do nothing with refreshes. +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). // API name: refresh func (r *Get) Refresh(refresh bool) *Get { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -395,7 +549,7 @@ func (r *Get) Refresh(refresh bool) *Get { return r } -// Routing Target the specified primary shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Get) Routing(routing string) *Get { r.values.Set("routing", routing) @@ -403,8 +557,8 @@ func (r *Get) Routing(routing string) *Get { return r } -// Source_ True or false to return the _source field or not, or a list of fields to -// return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. // API name: _source func (r *Get) Source_(sourceconfigparam string) *Get { r.values.Set("_source", sourceconfigparam) @@ -412,7 +566,10 @@ func (r *Get) Source_(sourceconfigparam string) *Get { return r } -// SourceExcludes_ A comma-separated list of source fields to exclude in the response. +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_excludes func (r *Get) SourceExcludes_(fields ...string) *Get { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -421,6 +578,10 @@ func (r *Get) SourceExcludes_(fields ...string) *Get { } // SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_includes func (r *Get) SourceIncludes_(fields ...string) *Get { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -428,9 +589,11 @@ func (r *Get) SourceIncludes_(fields ...string) *Get { return r } -// StoredFields List of stored fields to return as part of a hit. +// StoredFields A comma-separated list of stored fields to return as part of a hit. // If no fields are specified, no stored fields are included in the response. -// If this field is specified, the `_source` parameter defaults to false. +// If this field is specified, the `_source` parameter defaults to `false`. +// Only leaf fields can be retrieved with the `stored_field` option. +// Object fields can't be returned;​if specified, the request fails. // API name: stored_fields func (r *Get) StoredFields(fields ...string) *Get { r.values.Set("stored_fields", strings.Join(fields, ",")) @@ -438,8 +601,8 @@ func (r *Get) StoredFields(fields ...string) *Get { return r } -// Version Explicit version number for concurrency control. The specified version must -// match the current version of the document for the request to succeed. +// Version The version number for concurrency control. +// It must match the current version of the document for the request to succeed. // API name: version func (r *Get) Version(versionnumber string) *Get { r.values.Set("version", versionnumber) @@ -447,7 +610,7 @@ func (r *Get) Version(versionnumber string) *Get { return r } -// VersionType Specific version type: internal, external, external_gte. +// VersionType The version type. // API name: version_type func (r *Get) VersionType(versiontype versiontype.VersionType) *Get { r.values.Set("version_type", versiontype.String()) diff --git a/typedapi/core/get/response.go b/typedapi/core/get/response.go index 9d82deff35..4ef2a02189 100644 --- a/typedapi/core/get/response.go +++ b/typedapi/core/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -26,17 +26,33 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get/GetResponse.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get/GetResponse.ts#L23-L34 type Response struct { - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Found bool `json:"found"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Routing_ *string `json:"_routing,omitempty"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Source_ json.RawMessage `json:"_source,omitempty"` - Version_ *int64 `json:"_version,omitempty"` + + // Fields If the `stored_fields` parameter is set to `true` and `found` is `true`, it + // contains the document fields stored in the index. + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Found Indicates whether the document exists. + Found bool `json:"found"` + // Id_ The unique identifier for the document. + Id_ string `json:"_id"` + Ignored_ []string `json:"_ignored,omitempty"` + // Index_ The name of the index the document belongs to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Routing_ The explicit routing, if set. + Routing_ *string `json:"_routing,omitempty"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Source_ If `found` is `true`, it contains the document data formatted in JSON. + // If the `_source` parameter is set to `false` or the `stored_fields` parameter + // is set to `true`, it is excluded. + Source_ json.RawMessage `json:"_source,omitempty"` + // Version_ The document version, which is ncremented each time the document is updated. + Version_ *int64 `json:"_version,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/core/getscript/get_script.go b/typedapi/core/getscript/get_script.go index 185ea71173..2a7e5e8fc0 100644 --- a/typedapi/core/getscript/get_script.go +++ b/typedapi/core/getscript/get_script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get a script or search template. // Retrieves a stored script or search template. @@ -80,7 +80,7 @@ func NewGetScriptFunc(tp elastictransport.Interface) NewGetScript { // Get a script or search template. // Retrieves a stored script or search template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-stored-script-api.html func New(tp elastictransport.Interface) *GetScript { r := &GetScript{ transport: tp, @@ -290,7 +290,7 @@ func (r *GetScript) Header(key, value string) *GetScript { return r } -// Id Identifier for the stored script or search template. +// Id The identifier for the stored script or search template. // API Name: id func (r *GetScript) _id(id string) *GetScript { r.paramSet |= idMask @@ -299,7 +299,10 @@ func (r *GetScript) _id(id string) *GetScript { return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: master_timeout func (r *GetScript) MasterTimeout(duration string) *GetScript { r.values.Set("master_timeout", duration) diff --git a/typedapi/core/getscript/response.go b/typedapi/core/getscript/response.go index 45f82f8028..e16b4349f9 100644 --- a/typedapi/core/getscript/response.go +++ b/typedapi/core/getscript/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getscript @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getscript // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get_script/GetScriptResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get_script/GetScriptResponse.ts#L23-L29 type Response struct { Found bool `json:"found"` Id_ string `json:"_id"` diff --git a/typedapi/core/getscriptcontext/get_script_context.go b/typedapi/core/getscriptcontext/get_script_context.go index d718b58f5d..e920f1e8f2 100644 --- a/typedapi/core/getscriptcontext/get_script_context.go +++ b/typedapi/core/getscriptcontext/get_script_context.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns all script contexts. +// Get script contexts. +// +// Get a list of supported script contexts and their methods. package getscriptcontext import ( @@ -68,9 +70,11 @@ func NewGetScriptContextFunc(tp elastictransport.Interface) NewGetScriptContext } } -// Returns all script contexts. +// Get script contexts. +// +// Get a list of supported script contexts and their methods. // -// https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-script-contexts-api.html func New(tp elastictransport.Interface) *GetScriptContext { r := &GetScriptContext{ transport: tp, diff --git a/typedapi/core/getscriptcontext/response.go b/typedapi/core/getscriptcontext/response.go index 72dae7af66..a161c11c39 100644 --- a/typedapi/core/getscriptcontext/response.go +++ b/typedapi/core/getscriptcontext/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getscriptcontext @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getscriptcontext // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get_script_context/GetScriptContextResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get_script_context/GetScriptContextResponse.ts#L22-L26 type Response struct { Contexts []types.GetScriptContext `json:"contexts"` } diff --git a/typedapi/core/getscriptlanguages/get_script_languages.go b/typedapi/core/getscriptlanguages/get_script_languages.go index de99d7220f..081bcbcf03 100644 --- a/typedapi/core/getscriptlanguages/get_script_languages.go +++ b/typedapi/core/getscriptlanguages/get_script_languages.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns available script types, languages and contexts +// Get script languages. +// +// Get a list of available script types, languages, and contexts. package getscriptlanguages import ( @@ -68,9 +70,11 @@ func NewGetScriptLanguagesFunc(tp elastictransport.Interface) NewGetScriptLangua } } -// Returns available script types, languages and contexts +// Get script languages. +// +// Get a list of available script types, languages, and contexts. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-script-languages-api.html func New(tp elastictransport.Interface) *GetScriptLanguages { r := &GetScriptLanguages{ transport: tp, diff --git a/typedapi/core/getscriptlanguages/response.go b/typedapi/core/getscriptlanguages/response.go index 67668ac7c2..954d75580b 100644 --- a/typedapi/core/getscriptlanguages/response.go +++ b/typedapi/core/getscriptlanguages/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getscriptlanguages @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getscriptlanguages // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get_script_languages/GetScriptLanguagesResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get_script_languages/GetScriptLanguagesResponse.ts#L22-L27 type Response struct { LanguageContexts []types.LanguageContext `json:"language_contexts"` TypesAllowed []string `json:"types_allowed"` diff --git a/typedapi/core/getsource/get_source.go b/typedapi/core/getsource/get_source.go index 0825e0085c..f8b58b96da 100644 --- a/typedapi/core/getsource/get_source.go +++ b/typedapi/core/getsource/get_source.go @@ -16,10 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get a document's source. -// Returns the source of a document. +// +// Get the source of a document. +// For example: +// +// ``` +// GET my-index-000001/_source/1 +// ``` +// +// You can use the source filtering parameters to control which parts of the +// `_source` are returned: +// +// ``` +// GET +// my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +// ``` package getsource import ( @@ -84,7 +98,21 @@ func NewGetSourceFunc(tp elastictransport.Interface) NewGetSource { } // Get a document's source. -// Returns the source of a document. +// +// Get the source of a document. +// For example: +// +// ``` +// GET my-index-000001/_source/1 +// ``` +// +// You can use the source filtering parameters to control which parts of the +// `_source` are returned: +// +// ``` +// GET +// my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +// ``` // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html func New(tp elastictransport.Interface) *GetSource { @@ -302,7 +330,7 @@ func (r *GetSource) Header(key, value string) *GetSource { return r } -// Id Unique identifier of the document. +// Id A unique document identifier. // API Name: id func (r *GetSource) _id(id string) *GetSource { r.paramSet |= idMask @@ -311,7 +339,7 @@ func (r *GetSource) _id(id string) *GetSource { return r } -// Index Name of the index that contains the document. +// Index The name of the index that contains the document. // API Name: index func (r *GetSource) _index(index string) *GetSource { r.paramSet |= indexMask @@ -320,8 +348,8 @@ func (r *GetSource) _index(index string) *GetSource { return r } -// Preference Specifies the node or shard the operation should be performed on. Random by -// default. +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. // API name: preference func (r *GetSource) Preference(preference string) *GetSource { r.values.Set("preference", preference) @@ -329,7 +357,7 @@ func (r *GetSource) Preference(preference string) *GetSource { return r } -// Realtime Boolean) If true, the request is real-time as opposed to near-real-time. +// Realtime If `true`, the request is real-time as opposed to near-real-time. // API name: realtime func (r *GetSource) Realtime(realtime bool) *GetSource { r.values.Set("realtime", strconv.FormatBool(realtime)) @@ -337,8 +365,10 @@ func (r *GetSource) Realtime(realtime bool) *GetSource { return r } -// Refresh If true, Elasticsearch refreshes the affected shards to make this operation -// visible to search. If false, do nothing with refreshes. +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). // API name: refresh func (r *GetSource) Refresh(refresh bool) *GetSource { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -346,7 +376,7 @@ func (r *GetSource) Refresh(refresh bool) *GetSource { return r } -// Routing Target the specified primary shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *GetSource) Routing(routing string) *GetSource { r.values.Set("routing", routing) @@ -354,8 +384,8 @@ func (r *GetSource) Routing(routing string) *GetSource { return r } -// Source_ True or false to return the _source field or not, or a list of fields to -// return. +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. // API name: _source func (r *GetSource) Source_(sourceconfigparam string) *GetSource { r.values.Set("_source", sourceconfigparam) @@ -379,6 +409,7 @@ func (r *GetSource) SourceIncludes_(fields ...string) *GetSource { return r } +// StoredFields A comma-separated list of stored fields to return as part of a hit. // API name: stored_fields func (r *GetSource) StoredFields(fields ...string) *GetSource { r.values.Set("stored_fields", strings.Join(fields, ",")) @@ -386,8 +417,8 @@ func (r *GetSource) StoredFields(fields ...string) *GetSource { return r } -// Version Explicit version number for concurrency control. The specified version must -// match the current version of the document for the request to succeed. +// Version The version number for concurrency control. +// It must match the current version of the document for the request to succeed. // API name: version func (r *GetSource) Version(versionnumber string) *GetSource { r.values.Set("version", versionnumber) @@ -395,7 +426,7 @@ func (r *GetSource) Version(versionnumber string) *GetSource { return r } -// VersionType Specific version type: internal, external, external_gte. +// VersionType The version type. // API name: version_type func (r *GetSource) VersionType(versiontype versiontype.VersionType) *GetSource { r.values.Set("version_type", versiontype.String()) diff --git a/typedapi/core/getsource/response.go b/typedapi/core/getsource/response.go index f906ded05b..d00551c102 100644 --- a/typedapi/core/getsource/response.go +++ b/typedapi/core/getsource/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getsource @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getsource // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get_source/SourceResponse.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get_source/SourceResponse.ts#L20-L23 type Response = json.RawMessage diff --git a/typedapi/core/healthreport/health_report.go b/typedapi/core/healthreport/health_report.go index f3fdd35e9d..7424a34b26 100644 --- a/typedapi/core/healthreport/health_report.go +++ b/typedapi/core/healthreport/health_report.go @@ -16,9 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the health of the cluster. +// Get the cluster health. +// Get a report with the health status of an Elasticsearch cluster. +// The report contains a list of indicators that compose Elasticsearch +// functionality. +// +// Each indicator has a health status of: green, unknown, yellow or red. +// The indicator will provide an explanation and metadata describing the reason +// for its current health status. +// +// The cluster’s status is controlled by the worst indicator status. +// +// In the event that an indicator’s status is non-green, a list of impacts may +// be present in the indicator result which detail the functionalities that are +// negatively affected by the health issue. +// Each impact carries with it a severity level, an area of the system that is +// affected, and a simple description of the impact on the system. +// +// Some health indicators can determine the root cause of a health problem and +// prescribe a set of steps that can be performed in order to improve the health +// of the system. +// The root cause and remediation steps are encapsulated in a diagnosis. +// A diagnosis contains a cause detailing a root cause analysis, an action +// containing a brief description of the steps to take to fix the problem, the +// list of affected resources (if applicable), and a detailed step-by-step +// troubleshooting guide to fix the diagnosed problem. +// +// NOTE: The health indicators perform root cause analysis of non-green health +// statuses. This can be computationally expensive when called frequently. +// When setting up automated polling of the API for health status, set verbose +// to false to disable the more expensive analysis logic. package healthreport import ( @@ -74,7 +103,36 @@ func NewHealthReportFunc(tp elastictransport.Interface) NewHealthReport { } } -// Returns the health of the cluster. +// Get the cluster health. +// Get a report with the health status of an Elasticsearch cluster. +// The report contains a list of indicators that compose Elasticsearch +// functionality. +// +// Each indicator has a health status of: green, unknown, yellow or red. +// The indicator will provide an explanation and metadata describing the reason +// for its current health status. +// +// The cluster’s status is controlled by the worst indicator status. +// +// In the event that an indicator’s status is non-green, a list of impacts may +// be present in the indicator result which detail the functionalities that are +// negatively affected by the health issue. +// Each impact carries with it a severity level, an area of the system that is +// affected, and a simple description of the impact on the system. +// +// Some health indicators can determine the root cause of a health problem and +// prescribe a set of steps that can be performed in order to improve the health +// of the system. +// The root cause and remediation steps are encapsulated in a diagnosis. +// A diagnosis contains a cause detailing a root cause analysis, an action +// containing a brief description of the steps to take to fix the problem, the +// list of affected resources (if applicable), and a detailed step-by-step +// troubleshooting guide to fix the diagnosed problem. +// +// NOTE: The health indicators perform root cause analysis of non-green health +// statuses. This can be computationally expensive when called frequently. +// When setting up automated polling of the API for health status, set verbose +// to false to disable the more expensive analysis logic. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html func New(tp elastictransport.Interface) *HealthReport { diff --git a/typedapi/core/healthreport/response.go b/typedapi/core/healthreport/response.go index afc63726ee..9c0d808dd7 100644 --- a/typedapi/core/healthreport/response.go +++ b/typedapi/core/healthreport/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package healthreport @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package healthreport // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/Response.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/Response.ts#L22-L28 type Response struct { ClusterName string `json:"cluster_name"` Indicators types.Indicators `json:"indicators"` diff --git a/typedapi/core/index/index.go b/typedapi/core/index/index.go index 0dd0d41930..dec48aeb9b 100644 --- a/typedapi/core/index/index.go +++ b/typedapi/core/index/index.go @@ -16,13 +16,207 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Index a document. -// Adds a JSON document to the specified data stream or index and makes it +// Create or update a document in an index. +// +// Add a JSON document to the specified data stream or index and make it // searchable. // If the target is an index and the document already exists, the request // updates the document and increments its version. +// +// NOTE: You cannot use this API to send update requests for existing documents +// in a data stream. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add or overwrite a document using the `PUT //_doc/<_id>` request +// format, you must have the `create`, `index`, or `write` index privilege. +// * To add a document using the `POST //_doc/` request format, you must +// have the `create_doc`, `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// NOTE: Replica shards might not all be started when an indexing operation +// returns successfully. +// By default, only the primary is required. Set `wait_for_active_shards` to +// change this default behavior. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Optimistic concurrency control** +// +// Index operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// **No operation (noop) updates** +// +// When updating a document by using this API, a new version of the document is +// always created even if the document hasn't changed. +// If this isn't acceptable use the `_update` API with `detect_noop` set to +// `true`. +// The `detect_noop` option isn't available on this API because it doesn’t fetch +// the old source and isn't able to compare it against the new source. +// +// There isn't a definitive rule for when noop updates aren't acceptable. +// It's a combination of lots of factors like how frequently your data source +// sends updates that are actually noops and how many queries per second +// Elasticsearch runs on the shard receiving the updates. +// +// **Versioning** +// +// Each indexed document is given a version number. +// By default, internal versioning is used that starts at 1 and increments with +// each update, deletes included. +// Optionally, the version number can be set to an external value (for example, +// if maintained in a database). +// To enable this functionality, `version_type` should be set to `external`. +// The value provided must be a numeric, long value greater than or equal to 0, +// and less than around `9.2e+18`. +// +// NOTE: Versioning is completely real time, and is not affected by the near +// real time aspects of search operations. +// If no version is provided, the operation runs without any version checks. +// +// When using the external version type, the system checks to see if the version +// number passed to the index request is greater than the version of the +// currently stored document. +// If true, the document will be indexed and the new version number used. +// If the value provided is less than or equal to the stored document's version +// number, a version conflict will occur and the index operation will fail. For +// example: +// +// ``` +// PUT my-index-000001/_doc/1?version=2&version_type=external +// +// { +// "user": { +// "id": "elkbee" +// } +// } +// +// In this example, the operation will succeed since the supplied version of 2 +// is higher than the current document version of 1. +// If the document was already updated and its version was set to 2 or higher, +// the indexing command will fail and result in a conflict (409 HTTP status +// code). +// +// A nice side effect is that there is no need to maintain strict ordering of +// async indexing operations run as a result of changes to a source database, as +// long as version numbers from the source database are used. +// Even the simple case of updating the Elasticsearch index using data from a +// database is simplified if external versioning is used, as only the latest +// version will be used if the index operations arrive out of order. package index import ( @@ -91,12 +285,206 @@ func NewIndexFunc(tp elastictransport.Interface) NewIndex { } } -// Index a document. -// Adds a JSON document to the specified data stream or index and makes it +// Create or update a document in an index. +// +// Add a JSON document to the specified data stream or index and make it // searchable. // If the target is an index and the document already exists, the request // updates the document and increments its version. // +// NOTE: You cannot use this API to send update requests for existing documents +// in a data stream. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add or overwrite a document using the `PUT //_doc/<_id>` request +// format, you must have the `create`, `index`, or `write` index privilege. +// * To add a document using the `POST //_doc/` request format, you must +// have the `create_doc`, `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// NOTE: Replica shards might not all be started when an indexing operation +// returns successfully. +// By default, only the primary is required. Set `wait_for_active_shards` to +// change this default behavior. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Optimistic concurrency control** +// +// Index operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// **No operation (noop) updates** +// +// When updating a document by using this API, a new version of the document is +// always created even if the document hasn't changed. +// If this isn't acceptable use the `_update` API with `detect_noop` set to +// `true`. +// The `detect_noop` option isn't available on this API because it doesn’t fetch +// the old source and isn't able to compare it against the new source. +// +// There isn't a definitive rule for when noop updates aren't acceptable. +// It's a combination of lots of factors like how frequently your data source +// sends updates that are actually noops and how many queries per second +// Elasticsearch runs on the shard receiving the updates. +// +// **Versioning** +// +// Each indexed document is given a version number. +// By default, internal versioning is used that starts at 1 and increments with +// each update, deletes included. +// Optionally, the version number can be set to an external value (for example, +// if maintained in a database). +// To enable this functionality, `version_type` should be set to `external`. +// The value provided must be a numeric, long value greater than or equal to 0, +// and less than around `9.2e+18`. +// +// NOTE: Versioning is completely real time, and is not affected by the near +// real time aspects of search operations. +// If no version is provided, the operation runs without any version checks. +// +// When using the external version type, the system checks to see if the version +// number passed to the index request is greater than the version of the +// currently stored document. +// If true, the document will be indexed and the new version number used. +// If the value provided is less than or equal to the stored document's version +// number, a version conflict will occur and the index operation will fail. For +// example: +// +// ``` +// PUT my-index-000001/_doc/1?version=2&version_type=external +// +// { +// "user": { +// "id": "elkbee" +// } +// } +// +// In this example, the operation will succeed since the supplied version of 2 +// is higher than the current document version of 1. +// If the document was already updated and its version was set to 2 or higher, +// the indexing command will fail and result in a conflict (409 HTTP status +// code). +// +// A nice side effect is that there is no need to maintain strict ordering of +// async indexing operations run as a result of changes to a source database, as +// long as version numbers from the source database are used. +// Even the simple case of updating the Elasticsearch index using data from a +// database is simplified if external versioning is used, as only the latest +// version will be used if the index operations arrive out of order. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html func New(tp elastictransport.Interface) *Index { r := &Index{ @@ -105,8 +493,6 @@ func New(tp elastictransport.Interface) *Index { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -342,7 +728,9 @@ func (r *Index) Header(key, value string) *Index { return r } -// Id Unique identifier for the document. +// Id A unique identifier for the document. +// To automatically generate a document ID, use the `POST //_doc/` +// request format and omit this parameter. // API Name: id func (r *Index) Id(id string) *Index { r.paramSet |= idMask @@ -351,7 +739,13 @@ func (r *Index) Id(id string) *Index { return r } -// Index Name of the data stream or index to target. +// Index The name of the data stream or index to target. +// If the target doesn't exist and matches the name or wildcard (`*`) pattern of +// an index template with a `data_stream` definition, this request creates the +// data stream. +// If the target doesn't exist and doesn't match a data stream template, this +// request creates the index. +// You can check for existing targets with the resolve index API. // API Name: index func (r *Index) _index(index string) *Index { r.paramSet |= indexMask @@ -376,14 +770,23 @@ func (r *Index) IfSeqNo(sequencenumber string) *Index { return r } -// OpType Set to create to only index the document if it does not already exist (put if -// absent). +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Index) IncludeSourceOnError(includesourceonerror bool) *Index { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// OpType Set to `create` to only index the document if it does not already exist (put +// if absent). // If a document with the specified `_id` already exists, the indexing operation // will fail. -// Same as using the `/_create` endpoint. -// Valid values: `index`, `create`. -// If document id is specified, it defaults to `index`. +// The behavior is the same as using the `/_create` endpoint. +// If a document ID is specified, this paramater defaults to `index`. // Otherwise, it defaults to `create`. +// If the request targets a data stream, an `op_type` of `create` is required. // API name: op_type func (r *Index) OpType(optype optype.OpType) *Index { r.values.Set("op_type", optype.String()) @@ -391,7 +794,7 @@ func (r *Index) OpType(optype optype.OpType) *Index { return r } -// Pipeline ID of the pipeline to use to preprocess incoming documents. +// Pipeline The ID of the pipeline to use to preprocess incoming documents. // If the index has a default ingest pipeline specified, then setting the value // to `_none` disables the default ingest pipeline for this request. // If a final pipeline is configured it will always run, regardless of the value @@ -404,9 +807,10 @@ func (r *Index) Pipeline(pipeline string) *Index { } // Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation -// visible to search, if `wait_for` then wait for a refresh to make this -// operation visible to search, if `false` do nothing with refreshes. -// Valid values: `true`, `false`, `wait_for`. +// visible to search. +// If `wait_for`, it waits for a refresh to make this operation visible to +// search. +// If `false`, it does nothing with refreshes. // API name: refresh func (r *Index) Refresh(refresh refresh.Refresh) *Index { r.values.Set("refresh", refresh.String()) @@ -414,7 +818,7 @@ func (r *Index) Refresh(refresh refresh.Refresh) *Index { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Index) Routing(routing string) *Index { r.values.Set("routing", routing) @@ -422,8 +826,16 @@ func (r *Index) Routing(routing string) *Index { return r } -// Timeout Period the request waits for the following operations: automatic index +// Timeout The period the request waits for the following operations: automatic index // creation, dynamic mapping updates, waiting for active shards. +// +// This parameter is useful for situations where the primary shard assigned to +// perform the operation might not be available when the operation runs. +// Some reasons for this might be that the primary shard is currently recovering +// from a gateway or undergoing relocation. +// By default, the operation will wait on the primary shard to become available +// for at least 1 minute before failing and responding with an error. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Index) Timeout(duration string) *Index { r.values.Set("timeout", duration) @@ -431,9 +843,8 @@ func (r *Index) Timeout(duration string) *Index { return r } -// Version Explicit version number for concurrency control. -// The specified version must match the current version of the document for the -// request to succeed. +// Version An explicit version number for concurrency control. +// It must be a non-negative long number. // API name: version func (r *Index) Version(versionnumber string) *Index { r.values.Set("version", versionnumber) @@ -441,7 +852,7 @@ func (r *Index) Version(versionnumber string) *Index { return r } -// VersionType Specific version type: `external`, `external_gte`. +// VersionType The version type. // API name: version_type func (r *Index) VersionType(versiontype versiontype.VersionType) *Index { r.values.Set("version_type", versiontype.String()) @@ -451,8 +862,9 @@ func (r *Index) VersionType(versiontype versiontype.VersionType) *Index { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to all or any positive integer up to the total number of shards in the -// index (`number_of_replicas+1`). +// You can set it to `all` or any positive integer up to the total number of +// shards in the index (`number_of_replicas+1`). +// The default value of `1` means it waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Index) WaitForActiveShards(waitforactiveshards string) *Index { r.values.Set("wait_for_active_shards", waitforactiveshards) diff --git a/typedapi/core/index/request.go b/typedapi/core/index/request.go index 70010f8acb..6f429b3f0c 100644 --- a/typedapi/core/index/request.go +++ b/typedapi/core/index/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package index @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package index // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/index/IndexRequest.ts#L35-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/index/IndexRequest.ts#L35-L268 type Request = json.RawMessage // NewRequest returns a Request diff --git a/typedapi/core/index/response.go b/typedapi/core/index/response.go index 8e9fd2fbe3..da44d72520 100644 --- a/typedapi/core/index/response.go +++ b/typedapi/core/index/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package index @@ -27,16 +27,25 @@ import ( // Response holds the response body struct for the package index // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/index/IndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/index/IndexResponse.ts#L22-L24 type Response struct { - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Result result.Result `json:"result"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Shards_ types.ShardStatistics `json:"_shards"` - Version_ int64 `json:"_version"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` } // NewResponse returns a Response diff --git a/typedapi/core/info/info.go b/typedapi/core/info/info.go index f5afe98c69..733b1436a6 100644 --- a/typedapi/core/info/info.go +++ b/typedapi/core/info/info.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get cluster info. -// Returns basic information about the cluster. +// Get basic build, version, and cluster information. package info import ( @@ -70,9 +70,9 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { } // Get cluster info. -// Returns basic information about the cluster. +// Get basic build, version, and cluster information. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-api-root.html func New(tp elastictransport.Interface) *Info { r := &Info{ transport: tp, diff --git a/typedapi/core/info/response.go b/typedapi/core/info/response.go index 097c1abc4a..a70b79e352 100644 --- a/typedapi/core/info/response.go +++ b/typedapi/core/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package info @@ -26,13 +26,17 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/info/RootNodeInfoResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/info/RootNodeInfoResponse.ts#L23-L40 type Response struct { - ClusterName string `json:"cluster_name"` - ClusterUuid string `json:"cluster_uuid"` - Name string `json:"name"` - Tagline string `json:"tagline"` - Version types.ElasticsearchVersionInfo `json:"version"` + + // ClusterName The responding cluster's name. + ClusterName string `json:"cluster_name"` + ClusterUuid string `json:"cluster_uuid"` + // Name The responding node's name. + Name string `json:"name"` + Tagline string `json:"tagline"` + // Version The running version of Elasticsearch. + Version types.ElasticsearchVersionInfo `json:"version"` } // NewResponse returns a Response diff --git a/typedapi/core/knnsearch/knn_search.go b/typedapi/core/knnsearch/knn_search.go index bb88cac106..81a1b82162 100644 --- a/typedapi/core/knnsearch/knn_search.go +++ b/typedapi/core/knnsearch/knn_search.go @@ -16,9 +16,34 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Performs a kNN search. +// Run a knn search. +// +// NOTE: The kNN search API has been replaced by the `knn` option in the search +// API. +// +// Perform a k-nearest neighbor (kNN) search on a dense_vector field and return +// the matching documents. +// Given a query vector, the API finds the k closest vectors and returns those +// documents as search hits. +// +// Elasticsearch uses the HNSW algorithm to support efficient kNN search. +// Like most kNN algorithms, HNSW is an approximate method that sacrifices +// result accuracy for improved search speed. +// This means the results returned are not always the true k closest neighbors. +// +// The kNN search API supports restricting the search using a filter. +// The search will return the top k documents that also match the filter query. +// +// A kNN search response has the exact same structure as a search API response. +// However, certain sections have a meaning specific to kNN search: +// +// * The document `_score` is determined by the similarity between the query and +// document vector. +// * The `hits.total` object contains the total number of nearest neighbor +// candidates considered, which is `num_candidates * num_shards`. The +// `hits.total.relation` will always be `eq`, indicating an exact value. package knnsearch import ( @@ -81,9 +106,34 @@ func NewKnnSearchFunc(tp elastictransport.Interface) NewKnnSearch { } } -// Performs a kNN search. +// Run a knn search. +// +// NOTE: The kNN search API has been replaced by the `knn` option in the search +// API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html +// Perform a k-nearest neighbor (kNN) search on a dense_vector field and return +// the matching documents. +// Given a query vector, the API finds the k closest vectors and returns those +// documents as search hits. +// +// Elasticsearch uses the HNSW algorithm to support efficient kNN search. +// Like most kNN algorithms, HNSW is an approximate method that sacrifices +// result accuracy for improved search speed. +// This means the results returned are not always the true k closest neighbors. +// +// The kNN search API supports restricting the search using a filter. +// The search will return the top k documents that also match the filter query. +// +// A kNN search response has the exact same structure as a search API response. +// However, certain sections have a meaning specific to kNN search: +// +// * The document `_score` is determined by the similarity between the query and +// document vector. +// * The `hits.total` object contains the total number of nearest neighbor +// candidates considered, which is `num_candidates * num_shards`. The +// `hits.total.relation` will always be `eq`, indicating an exact value. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html func New(tp elastictransport.Interface) *KnnSearch { r := &KnnSearch{ transport: tp, @@ -91,8 +141,6 @@ func New(tp elastictransport.Interface) *KnnSearch { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -305,7 +353,7 @@ func (r *KnnSearch) Header(key, value string) *KnnSearch { } // Index A comma-separated list of index names to search; -// use `_all` or to perform the operation on all indices +// use `_all` or to perform the operation on all indices. // API Name: index func (r *KnnSearch) _index(index string) *KnnSearch { r.paramSet |= indexMask @@ -314,7 +362,7 @@ func (r *KnnSearch) _index(index string) *KnnSearch { return r } -// Routing A comma-separated list of specific routing values +// Routing A comma-separated list of specific routing values. // API name: routing func (r *KnnSearch) Routing(routing string) *KnnSearch { r.values.Set("routing", routing) @@ -366,63 +414,99 @@ func (r *KnnSearch) Pretty(pretty bool) *KnnSearch { return r } -// DocvalueFields The request returns doc values for field names matching these patterns -// in the hits.fields property of the response. Accepts wildcard (*) patterns. +// The request returns doc values for field names matching these patterns +// in the `hits.fields` property of the response. +// It accepts wildcard (`*`) patterns. // API name: docvalue_fields -func (r *KnnSearch) DocvalueFields(docvaluefields ...types.FieldAndFormat) *KnnSearch { - r.req.DocvalueFields = docvaluefields +func (r *KnnSearch) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + } return r } -// Fields The request returns values for field names matching these patterns -// in the hits.fields property of the response. Accepts wildcard (*) patterns. +// The request returns values for field names matching these patterns +// in the `hits.fields` property of the response. +// It accepts wildcard (`*`) patterns. // API name: fields func (r *KnnSearch) Fields(fields ...string) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Fields = fields return r } -// Filter Query to filter the documents that can match. The kNN search will return the -// top +// A query to filter the documents that can match. The kNN search will return +// the top // `k` documents that also match this filter. The value can be a single query or // a // list of queries. If `filter` isn't provided, all documents are allowed to // match. // API name: filter -func (r *KnnSearch) Filter(filters ...types.Query) *KnnSearch { - r.req.Filter = filters +func (r *KnnSearch) Filter(filters ...types.QueryVariant) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + r.req.Filter[i] = *v.QueryCaster() + } return r } -// Knn kNN query to execute +// The kNN query to run. // API name: knn -func (r *KnnSearch) Knn(knn *types.CoreKnnQuery) *KnnSearch { +func (r *KnnSearch) Knn(knn types.CoreKnnQueryVariant) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Knn = *knn + r.req.Knn = *knn.CoreKnnQueryCaster() return r } -// Source_ Indicates which source fields are returned for matching documents. These -// fields are returned in the hits._source property of the search response. +// Indicates which source fields are returned for matching documents. These +// fields are returned in the `hits._source` property of the search response. // API name: _source -func (r *KnnSearch) Source_(sourceconfig types.SourceConfig) *KnnSearch { - r.req.Source_ = sourceconfig +func (r *KnnSearch) Source_(sourceconfig types.SourceConfigVariant) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// StoredFields List of stored fields to return as part of a hit. If no fields are specified, +// A list of stored fields to return as part of a hit. If no fields are +// specified, // no stored fields are included in the response. If this field is specified, -// the _source -// parameter defaults to false. You can pass _source: true to return both source -// fields +// the `_source` +// parameter defaults to `false`. You can pass `_source: true` to return both +// source fields // and stored fields in the search response. // API name: stored_fields func (r *KnnSearch) StoredFields(fields ...string) *KnnSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.StoredFields = fields return r diff --git a/typedapi/core/knnsearch/request.go b/typedapi/core/knnsearch/request.go index 781de09bf5..2773595c40 100644 --- a/typedapi/core/knnsearch/request.go +++ b/typedapi/core/knnsearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package knnsearch @@ -32,32 +32,35 @@ import ( // Request holds the request body struct for the package knnsearch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/knn_search/KnnSearchRequest.ts#L27-L80 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/knn_search/KnnSearchRequest.ts#L26-L112 type Request struct { // DocvalueFields The request returns doc values for field names matching these patterns - // in the hits.fields property of the response. Accepts wildcard (*) patterns. + // in the `hits.fields` property of the response. + // It accepts wildcard (`*`) patterns. DocvalueFields []types.FieldAndFormat `json:"docvalue_fields,omitempty"` // Fields The request returns values for field names matching these patterns - // in the hits.fields property of the response. Accepts wildcard (*) patterns. + // in the `hits.fields` property of the response. + // It accepts wildcard (`*`) patterns. Fields []string `json:"fields,omitempty"` - // Filter Query to filter the documents that can match. The kNN search will return the - // top + // Filter A query to filter the documents that can match. The kNN search will return + // the top // `k` documents that also match this filter. The value can be a single query or // a // list of queries. If `filter` isn't provided, all documents are allowed to // match. Filter []types.Query `json:"filter,omitempty"` - // Knn kNN query to execute + // Knn The kNN query to run. Knn types.CoreKnnQuery `json:"knn"` // Source_ Indicates which source fields are returned for matching documents. These - // fields are returned in the hits._source property of the search response. + // fields are returned in the `hits._source` property of the search response. Source_ types.SourceConfig `json:"_source,omitempty"` - // StoredFields List of stored fields to return as part of a hit. If no fields are specified, + // StoredFields A list of stored fields to return as part of a hit. If no fields are + // specified, // no stored fields are included in the response. If this field is specified, - // the _source - // parameter defaults to false. You can pass _source: true to return both source - // fields + // the `_source` + // parameter defaults to `false`. You can pass `_source: true` to return both + // source fields // and stored fields in the search response. StoredFields []string `json:"stored_fields,omitempty"` } diff --git a/typedapi/core/knnsearch/response.go b/typedapi/core/knnsearch/response.go index a88f661fef..9c5d14cddb 100644 --- a/typedapi/core/knnsearch/response.go +++ b/typedapi/core/knnsearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package knnsearch @@ -28,23 +28,23 @@ import ( // Response holds the response body struct for the package knnsearch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/knn_search/KnnSearchResponse.ts#L26-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/knn_search/KnnSearchResponse.ts#L26-L54 type Response struct { - // Fields Contains field values for the documents. These fields + // Fields The field values for the documents. These fields // must be specified in the request using the `fields` parameter. Fields map[string]json.RawMessage `json:"fields,omitempty"` - // Hits Contains returned documents and metadata. + // Hits The returned documents and metadata. Hits types.HitsMetadata `json:"hits"` - // MaxScore Highest returned document score. This value is null for requests + // MaxScore The highest returned document score. This value is null for requests // that do not sort by score. MaxScore *types.Float64 `json:"max_score,omitempty"` - // Shards_ Contains a count of shards used for the request. + // Shards_ A count of shards used for the request. Shards_ types.ShardStatistics `json:"_shards"` // TimedOut If true, the request timed out before completion; // returned results may be partial or empty. TimedOut bool `json:"timed_out"` - // Took Milliseconds it took Elasticsearch to execute the request. + // Took The milliseconds it took Elasticsearch to run the request. Took int64 `json:"took"` } diff --git a/typedapi/core/mget/mget.go b/typedapi/core/mget/mget.go index 411cd58414..6a7a93c47c 100644 --- a/typedapi/core/mget/mget.go +++ b/typedapi/core/mget/mget.go @@ -16,9 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Allows to get multiple documents in one request. +// Get multiple documents. +// +// Get multiple JSON documents by ID from one or more indices. +// If you specify an index in the request URI, you only need to specify the +// document IDs in the request body. +// To ensure fast responses, this multi get (mget) API responds with partial +// results if one or more shards fail. +// +// **Filter source fields** +// +// By default, the `_source` field is returned for every document (if stored). +// Use the `_source` and `_source_include` or `source_exclude` attributes to +// filter what fields are returned for a particular document. +// You can include the `_source`, `_source_includes`, and `_source_excludes` +// query parameters in the request URI to specify the defaults to use when there +// are no per-document instructions. +// +// **Get stored fields** +// +// Use the `stored_fields` attribute to specify the set of stored fields you +// want to retrieve. +// Any requested fields that are not stored are ignored. +// You can include the `stored_fields` query parameter in the request URI to +// specify the defaults to use when there are no per-document instructions. package mget import ( @@ -79,7 +102,30 @@ func NewMgetFunc(tp elastictransport.Interface) NewMget { } } -// Allows to get multiple documents in one request. +// Get multiple documents. +// +// Get multiple JSON documents by ID from one or more indices. +// If you specify an index in the request URI, you only need to specify the +// document IDs in the request body. +// To ensure fast responses, this multi get (mget) API responds with partial +// results if one or more shards fail. +// +// **Filter source fields** +// +// By default, the `_source` field is returned for every document (if stored). +// Use the `_source` and `_source_include` or `source_exclude` attributes to +// filter what fields are returned for a particular document. +// You can include the `_source`, `_source_includes`, and `_source_excludes` +// query parameters in the request URI to specify the defaults to use when there +// are no per-document instructions. +// +// **Get stored fields** +// +// Use the `stored_fields` attribute to specify the set of stored fields you +// want to retrieve. +// Any requested fields that are not stored are ignored. +// You can include the `stored_fields` query parameter in the request URI to +// specify the defaults to use when there are no per-document instructions. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html func New(tp elastictransport.Interface) *Mget { @@ -89,8 +135,6 @@ func New(tp elastictransport.Interface) *Mget { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -446,19 +490,31 @@ func (r *Mget) Pretty(pretty bool) *Mget { return r } -// Docs The documents you want to retrieve. Required if no index is specified in the +// The documents you want to retrieve. Required if no index is specified in the // request URI. // API name: docs -func (r *Mget) Docs(docs ...types.MgetOperation) *Mget { - r.req.Docs = docs +func (r *Mget) Docs(docs ...types.MgetOperationVariant) *Mget { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + r.req.Docs = append(r.req.Docs, *v.MgetOperationCaster()) + + } return r } -// Ids The IDs of the documents you want to retrieve. Allowed when the index is +// The IDs of the documents you want to retrieve. Allowed when the index is // specified in the request URI. // API name: ids func (r *Mget) Ids(ids ...string) *Mget { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ids = ids return r diff --git a/typedapi/core/mget/request.go b/typedapi/core/mget/request.go index 77ec80701b..020b473cbe 100644 --- a/typedapi/core/mget/request.go +++ b/typedapi/core/mget/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mget @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package mget // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/mget/MultiGetRequest.ts#L25-L98 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/mget/MultiGetRequest.ts#L25-L127 type Request struct { // Docs The documents you want to retrieve. Required if no index is specified in the diff --git a/typedapi/core/mget/response.go b/typedapi/core/mget/response.go index a7a9720336..b5afbe1d77 100644 --- a/typedapi/core/mget/response.go +++ b/typedapi/core/mget/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mget @@ -32,8 +32,15 @@ import ( // Response holds the response body struct for the package mget // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/mget/MultiGetResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/mget/MultiGetResponse.ts#L22-L31 type Response struct { + + // Docs The response includes a docs array that contains the documents in the order + // specified in the request. + // The structure of the returned documents is similar to that returned by the + // get API. + // If there is a failure getting a particular document, the error is included in + // place of the document. Docs []types.MgetResponseItem `json:"docs"` } @@ -76,7 +83,7 @@ func (s *Response) UnmarshalJSON(data []byte) error { switch t { - case "fields", "found", "_primary_term", "_routing", "_seq_no", "_source", "_version": + case "fields", "found", "_ignored", "_primary_term", "_routing", "_seq_no", "_source", "_version": o := types.NewGetResult() localDec := json.NewDecoder(bytes.NewReader(message)) if err := localDec.Decode(&o); err != nil { diff --git a/typedapi/core/msearch/msearch.go b/typedapi/core/msearch/msearch.go index 7b6fbcaab2..6b4ffe6171 100644 --- a/typedapi/core/msearch/msearch.go +++ b/typedapi/core/msearch/msearch.go @@ -16,9 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Allows to execute several search operations in one request. +// Run multiple searches. +// +// The format of the request is similar to the bulk API format and makes use of +// the newline delimited JSON (NDJSON) format. +// The structure is as follows: +// +// ``` +// header\n +// body\n +// header\n +// body\n +// ``` +// +// This structure is specifically optimized to reduce parsing if a specific +// search ends up redirected to another node. +// +// IMPORTANT: The final line of data must end with a newline character `\n`. +// Each newline character may be preceded by a carriage return `\r`. +// When sending requests to this endpoint the `Content-Type` header should be +// set to `application/x-ndjson`. package msearch import ( @@ -81,7 +100,26 @@ func NewMsearchFunc(tp elastictransport.Interface) NewMsearch { } } -// Allows to execute several search operations in one request. +// Run multiple searches. +// +// The format of the request is similar to the bulk API format and makes use of +// the newline delimited JSON (NDJSON) format. +// The structure is as follows: +// +// ``` +// header\n +// body\n +// header\n +// body\n +// ``` +// +// This structure is specifically optimized to reduce parsing if a specific +// search ends up redirected to another node. +// +// IMPORTANT: The final line of data must end with a newline character `\n`. +// Each newline character may be preceded by a carriage return `\r`. +// When sending requests to this endpoint the `Content-Type` header should be +// set to `application/x-ndjson`. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html func New(tp elastictransport.Interface) *Msearch { diff --git a/typedapi/core/msearch/request.go b/typedapi/core/msearch/request.go index c56d8913e1..3cc6a3c896 100644 --- a/typedapi/core/msearch/request.go +++ b/typedapi/core/msearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package msearch @@ -26,5 +26,5 @@ import ( // Request holds the request body struct for the package msearch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch/MultiSearchRequest.ts#L25-L106 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch/MultiSearchRequest.ts#L25-L135 type Request = []types.MsearchRequestItem diff --git a/typedapi/core/msearch/response.go b/typedapi/core/msearch/response.go index eb0bf6fe1f..b170ed7ae6 100644 --- a/typedapi/core/msearch/response.go +++ b/typedapi/core/msearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package msearch @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package msearch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch/MultiSearchResponse.ts#L25-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch/MultiSearchResponse.ts#L25-L27 type Response struct { Responses []types.MsearchResponseItem `json:"responses"` Took int64 `json:"took"` diff --git a/typedapi/core/msearchtemplate/msearch_template.go b/typedapi/core/msearchtemplate/msearch_template.go index 8f5ac6f4cb..b44b601744 100644 --- a/typedapi/core/msearchtemplate/msearch_template.go +++ b/typedapi/core/msearchtemplate/msearch_template.go @@ -16,9 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Runs multiple templated searches with a single request. +// Run multiple templated searches. +// +// Run multiple templated searches with a single request. +// If you are providing a text file or text input to `curl`, use the +// `--data-binary` flag instead of `-d` to preserve newlines. +// For example: +// +// ``` +// $ cat requests +// { "index": "my-index" } +// { "id": "my-search-template", "params": { "query_string": "hello world", +// "from": 0, "size": 10 }} +// { "index": "my-other-index" } +// { "id": "my-other-search-template", "params": { "query_type": "match_all" }} +// +// $ curl -H "Content-Type: application/x-ndjson" -XGET +// localhost:9200/_msearch/template --data-binary "@requests"; echo +// ``` package msearchtemplate import ( @@ -80,9 +97,26 @@ func NewMsearchTemplateFunc(tp elastictransport.Interface) NewMsearchTemplate { } } -// Runs multiple templated searches with a single request. +// Run multiple templated searches. +// +// Run multiple templated searches with a single request. +// If you are providing a text file or text input to `curl`, use the +// `--data-binary` flag instead of `-d` to preserve newlines. +// For example: +// +// ``` +// $ cat requests +// { "index": "my-index" } +// { "id": "my-search-template", "params": { "query_string": "hello world", +// "from": 0, "size": 10 }} +// { "index": "my-other-index" } +// { "id": "my-other-search-template", "params": { "query_type": "match_all" }} +// +// $ curl -H "Content-Type: application/x-ndjson" -XGET +// localhost:9200/_msearch/template --data-binary "@requests"; echo +// ``` // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-search-template.html func New(tp elastictransport.Interface) *MsearchTemplate { r := &MsearchTemplate{ transport: tp, @@ -317,8 +351,8 @@ func (r *MsearchTemplate) Header(key, value string) *MsearchTemplate { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams and indices, omit this parameter or use `*`. // API Name: index func (r *MsearchTemplate) Index(index string) *MsearchTemplate { @@ -337,7 +371,7 @@ func (r *MsearchTemplate) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Mse return r } -// MaxConcurrentSearches Maximum number of concurrent searches the API can run. +// MaxConcurrentSearches The maximum number of concurrent searches the API can run. // API name: max_concurrent_searches func (r *MsearchTemplate) MaxConcurrentSearches(maxconcurrentsearches string) *MsearchTemplate { r.values.Set("max_concurrent_searches", maxconcurrentsearches) @@ -346,7 +380,6 @@ func (r *MsearchTemplate) MaxConcurrentSearches(maxconcurrentsearches string) *M } // SearchType The type of the search operation. -// Available options: `query_then_fetch`, `dfs_query_then_fetch`. // API name: search_type func (r *MsearchTemplate) SearchType(searchtype searchtype.SearchType) *MsearchTemplate { r.values.Set("search_type", searchtype.String()) diff --git a/typedapi/core/msearchtemplate/request.go b/typedapi/core/msearchtemplate/request.go index 1e4c89edc0..187a0fe4ee 100644 --- a/typedapi/core/msearchtemplate/request.go +++ b/typedapi/core/msearchtemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package msearchtemplate @@ -26,5 +26,5 @@ import ( // Request holds the request body struct for the package msearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch_template/MultiSearchTemplateRequest.ts#L25-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch_template/MultiSearchTemplateRequest.ts#L25-L116 type Request = []types.RequestItem diff --git a/typedapi/core/msearchtemplate/response.go b/typedapi/core/msearchtemplate/response.go index efd210a1a2..671098e682 100644 --- a/typedapi/core/msearchtemplate/response.go +++ b/typedapi/core/msearchtemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package msearchtemplate @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package msearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch_template/MultiSearchTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch_template/MultiSearchTemplateResponse.ts#L22-L31 type Response struct { Responses []types.MsearchResponseItem `json:"responses"` Took int64 `json:"took"` diff --git a/typedapi/core/mtermvectors/mtermvectors.go b/typedapi/core/mtermvectors/mtermvectors.go index 7892aa4a64..8fe1f622fb 100644 --- a/typedapi/core/mtermvectors/mtermvectors.go +++ b/typedapi/core/mtermvectors/mtermvectors.go @@ -16,9 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns multiple termvectors in one request. +// Get multiple term vectors. +// +// Get multiple term vectors with a single request. +// You can specify existing documents by index and ID or provide artificial +// documents in the body of the request. +// You can specify the index in the request body or request URI. +// The response contains a `docs` array with all the fetched termvectors. +// Each element has the structure provided by the termvectors API. +// +// **Artificial documents** +// +// You can also use `mtermvectors` to generate term vectors for artificial +// documents provided in the body of the request. +// The mapping used is determined by the specified `_index`. package mtermvectors import ( @@ -80,7 +93,20 @@ func NewMtermvectorsFunc(tp elastictransport.Interface) NewMtermvectors { } } -// Returns multiple termvectors in one request. +// Get multiple term vectors. +// +// Get multiple term vectors with a single request. +// You can specify existing documents by index and ID or provide artificial +// documents in the body of the request. +// You can specify the index in the request body or request URI. +// The response contains a `docs` array with all the fetched termvectors. +// Each element has the structure provided by the termvectors API. +// +// **Artificial documents** +// +// You can also use `mtermvectors` to generate term vectors for artificial +// documents provided in the body of the request. +// The mapping used is determined by the specified `_index`. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html func New(tp elastictransport.Interface) *Mtermvectors { @@ -90,8 +116,6 @@ func New(tp elastictransport.Interface) *Mtermvectors { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -308,7 +332,7 @@ func (r *Mtermvectors) Header(key, value string) *Mtermvectors { return r } -// Index Name of the index that contains the documents. +// Index The name of the index that contains the documents. // API Name: index func (r *Mtermvectors) Index(index string) *Mtermvectors { r.paramSet |= indexMask @@ -317,10 +341,10 @@ func (r *Mtermvectors) Index(index string) *Mtermvectors { return r } -// Fields Comma-separated list or wildcard expressions of fields to include in the +// Fields A comma-separated list or wildcard expressions of fields to include in the // statistics. -// Used as the default list unless a specific field list is provided in the -// `completion_fields` or `fielddata_fields` parameters. +// It is used as the default list unless a specific field list is provided in +// the `completion_fields` or `fielddata_fields` parameters. // API name: fields func (r *Mtermvectors) Fields(fields ...string) *Mtermvectors { r.values.Set("fields", strings.Join(fields, ",")) @@ -361,8 +385,8 @@ func (r *Mtermvectors) Positions(positions bool) *Mtermvectors { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *Mtermvectors) Preference(preference string) *Mtermvectors { r.values.Set("preference", preference) @@ -378,7 +402,7 @@ func (r *Mtermvectors) Realtime(realtime bool) *Mtermvectors { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Mtermvectors) Routing(routing string) *Mtermvectors { r.values.Set("routing", routing) @@ -402,7 +426,7 @@ func (r *Mtermvectors) Version(versionnumber string) *Mtermvectors { return r } -// VersionType Specific version type. +// VersionType The version type. // API name: version_type func (r *Mtermvectors) VersionType(versiontype versiontype.VersionType) *Mtermvectors { r.values.Set("version_type", versiontype.String()) @@ -454,19 +478,33 @@ func (r *Mtermvectors) Pretty(pretty bool) *Mtermvectors { return r } -// Docs Array of existing or artificial documents. +// An array of existing or artificial documents. // API name: docs -func (r *Mtermvectors) Docs(docs ...types.MTermVectorsOperation) *Mtermvectors { - r.req.Docs = docs +func (r *Mtermvectors) Docs(docs ...types.MTermVectorsOperationVariant) *Mtermvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + + r.req.Docs = append(r.req.Docs, *v.MTermVectorsOperationCaster()) + } return r } -// Ids Simplified syntax to specify documents by their ID if they're in the same +// A simplified syntax to specify documents by their ID if they're in the same // index. // API name: ids func (r *Mtermvectors) Ids(ids ...string) *Mtermvectors { - r.req.Ids = ids + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ids { + + r.req.Ids = append(r.req.Ids, v) + } return r } diff --git a/typedapi/core/mtermvectors/request.go b/typedapi/core/mtermvectors/request.go index 255bf46b36..c0e7b79fa3 100644 --- a/typedapi/core/mtermvectors/request.go +++ b/typedapi/core/mtermvectors/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mtermvectors @@ -29,12 +29,12 @@ import ( // Request holds the request body struct for the package mtermvectors // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/mtermvectors/MultiTermVectorsRequest.ts#L31-L109 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/mtermvectors/MultiTermVectorsRequest.ts#L31-L134 type Request struct { - // Docs Array of existing or artificial documents. + // Docs An array of existing or artificial documents. Docs []types.MTermVectorsOperation `json:"docs,omitempty"` - // Ids Simplified syntax to specify documents by their ID if they're in the same + // Ids A simplified syntax to specify documents by their ID if they're in the same // index. Ids []string `json:"ids,omitempty"` } diff --git a/typedapi/core/mtermvectors/response.go b/typedapi/core/mtermvectors/response.go index 0039a383d3..b264c2d319 100644 --- a/typedapi/core/mtermvectors/response.go +++ b/typedapi/core/mtermvectors/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mtermvectors @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mtermvectors // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/mtermvectors/MultiTermVectorsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/mtermvectors/MultiTermVectorsResponse.ts#L22-L24 type Response struct { Docs []types.TermVectorsResult `json:"docs"` } diff --git a/typedapi/core/openpointintime/open_point_in_time.go b/typedapi/core/openpointintime/open_point_in_time.go index 24c96f3943..f86e2722b4 100644 --- a/typedapi/core/openpointintime/open_point_in_time.go +++ b/typedapi/core/openpointintime/open_point_in_time.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// A search request by default executes against the most recent visible data of -// the target indices, +// Open a point in time. +// +// A search request by default runs against the most recent visible data of the +// target indices, // which is called point in time. Elasticsearch pit (point in time) is a // lightweight view into the // state of the data as it existed when initiated. In some cases, it’s preferred @@ -29,9 +31,60 @@ // `search_after` requests, then the results of those requests might not be // consistent as changes happening // between searches are only visible to the more recent point in time. +// +// A point in time must be opened explicitly before being used in search +// requests. +// +// A subsequent search request with the `pit` parameter must not specify +// `index`, `routing`, or `preference` values as these parameters are copied +// from the point in time. +// +// Just like regular searches, you can use `from` and `size` to page through +// point in time search results, up to the first 10,000 hits. +// If you want to retrieve more hits, use PIT with `search_after`. +// +// IMPORTANT: The open point in time request and each subsequent search request +// can return different identifiers; always use the most recently received ID +// for the next search request. +// +// When a PIT that contains shard failures is used in a search request, the +// missing are always reported in the search response as a +// `NoShardAvailableActionException` exception. +// To get rid of these exceptions, a new PIT needs to be created so that shards +// missing from the previous PIT can be handled, assuming they become available +// in the meantime. +// +// **Keeping point in time alive** +// +// The `keep_alive` parameter, which is passed to a open point in time request +// and search request, extends the time to live of the corresponding point in +// time. +// The value does not need to be long enough to process all data — it just needs +// to be long enough for the next request. +// +// Normally, the background merge process optimizes the index by merging +// together smaller segments to create new, bigger segments. +// Once the smaller segments are no longer needed they are deleted. +// However, open point-in-times prevent the old segments from being deleted +// since they are still in use. +// +// TIP: Keeping older segments alive means that more disk space and file handles +// are needed. +// Ensure that you have configured your nodes to have ample free file handles. +// +// Additionally, if a segment contains deleted or updated documents then the +// point in time must keep track of whether each document in the segment was +// live at the time of the initial search request. +// Ensure that your nodes have sufficient heap space if you have many open +// point-in-times on an index that is subject to ongoing deletes or updates. +// Note that a point-in-time doesn't prevent its associated indices from being +// deleted. +// You can check how many point-in-times (that is, search contexts) are open +// with the nodes stats API. package openpointintime import ( + gobytes "bytes" "context" "encoding/json" "errors" @@ -63,6 +116,10 @@ type OpenPointInTime struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int index string @@ -87,8 +144,10 @@ func NewOpenPointInTimeFunc(tp elastictransport.Interface) NewOpenPointInTime { } } -// A search request by default executes against the most recent visible data of -// the target indices, +// Open a point in time. +// +// A search request by default runs against the most recent visible data of the +// target indices, // which is called point in time. Elasticsearch pit (point in time) is a // lightweight view into the // state of the data as it existed when initiated. In some cases, it’s preferred @@ -99,12 +158,64 @@ func NewOpenPointInTimeFunc(tp elastictransport.Interface) NewOpenPointInTime { // consistent as changes happening // between searches are only visible to the more recent point in time. // +// A point in time must be opened explicitly before being used in search +// requests. +// +// A subsequent search request with the `pit` parameter must not specify +// `index`, `routing`, or `preference` values as these parameters are copied +// from the point in time. +// +// Just like regular searches, you can use `from` and `size` to page through +// point in time search results, up to the first 10,000 hits. +// If you want to retrieve more hits, use PIT with `search_after`. +// +// IMPORTANT: The open point in time request and each subsequent search request +// can return different identifiers; always use the most recently received ID +// for the next search request. +// +// When a PIT that contains shard failures is used in a search request, the +// missing are always reported in the search response as a +// `NoShardAvailableActionException` exception. +// To get rid of these exceptions, a new PIT needs to be created so that shards +// missing from the previous PIT can be handled, assuming they become available +// in the meantime. +// +// **Keeping point in time alive** +// +// The `keep_alive` parameter, which is passed to a open point in time request +// and search request, extends the time to live of the corresponding point in +// time. +// The value does not need to be long enough to process all data — it just needs +// to be long enough for the next request. +// +// Normally, the background merge process optimizes the index by merging +// together smaller segments to create new, bigger segments. +// Once the smaller segments are no longer needed they are deleted. +// However, open point-in-times prevent the old segments from being deleted +// since they are still in use. +// +// TIP: Keeping older segments alive means that more disk space and file handles +// are needed. +// Ensure that you have configured your nodes to have ample free file handles. +// +// Additionally, if a segment contains deleted or updated documents then the +// point in time must keep track of whether each document in the segment was +// live at the time of the initial search request. +// Ensure that your nodes have sufficient heap space if you have many open +// point-in-times on an index that is subject to ongoing deletes or updates. +// Note that a point-in-time doesn't prevent its associated indices from being +// deleted. +// You can check how many point-in-times (that is, search contexts) are open +// with the nodes stats API. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html func New(tp elastictransport.Interface) *OpenPointInTime { r := &OpenPointInTime{ transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -116,6 +227,21 @@ func New(tp elastictransport.Interface) *OpenPointInTime { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OpenPointInTime) Raw(raw io.Reader) *OpenPointInTime { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OpenPointInTime) Request(req *Request) *OpenPointInTime { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *OpenPointInTime) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -125,6 +251,31 @@ func (r *OpenPointInTime) HttpRequest(ctx context.Context) (*http.Request, error var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OpenPointInTime: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -269,45 +420,6 @@ func (r OpenPointInTime) Do(providedCtx context.Context) (*Response, error) { return nil, errorResponse } -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r OpenPointInTime) IsSuccess(providedCtx context.Context) (bool, error) { - var ctx context.Context - r.spanStarted = true - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - ctx = instrument.Start(providedCtx, "open_point_in_time") - defer instrument.Close(ctx) - } - if ctx == nil { - ctx = providedCtx - } - - res, err := r.Perform(ctx) - - if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err - } - - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil - } - - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the OpenPointInTime query execution, status code: %d", res.StatusCode) - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordError(ctx, err) - } - return false, err - } - - return false, nil -} - // Header set a key, value pair in the OpenPointInTime headers map. func (r *OpenPointInTime) Header(key, value string) *OpenPointInTime { r.headers.Set(key, value) @@ -325,7 +437,7 @@ func (r *OpenPointInTime) _index(index string) *OpenPointInTime { return r } -// KeepAlive Extends the time to live of the corresponding point in time. +// KeepAlive Extend the length of time that the point in time persists. // API name: keep_alive func (r *OpenPointInTime) KeepAlive(duration string) *OpenPointInTime { r.values.Set("keep_alive", duration) @@ -342,8 +454,8 @@ func (r *OpenPointInTime) IgnoreUnavailable(ignoreunavailable bool) *OpenPointIn return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// By default, it is random. // API name: preference func (r *OpenPointInTime) Preference(preference string) *OpenPointInTime { r.values.Set("preference", preference) @@ -351,7 +463,7 @@ func (r *OpenPointInTime) Preference(preference string) *OpenPointInTime { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *OpenPointInTime) Routing(routing string) *OpenPointInTime { r.values.Set("routing", routing) @@ -359,10 +471,10 @@ func (r *OpenPointInTime) Routing(routing string) *OpenPointInTime { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. Valid values are: +// It supports comma-separated values, such as `open,hidden`. Valid values are: // `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards func (r *OpenPointInTime) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *OpenPointInTime { @@ -375,6 +487,19 @@ func (r *OpenPointInTime) ExpandWildcards(expandwildcards ...expandwildcard.Expa return r } +// AllowPartialSearchResults Indicates whether the point in time tolerates unavailable shards or shard +// failures when initially creating the PIT. +// If `false`, creating a point in time request when a shard is missing or +// unavailable will throw an exception. +// If `true`, the point in time will contain all the shards that are available +// at the time of the request. +// API name: allow_partial_search_results +func (r *OpenPointInTime) AllowPartialSearchResults(allowpartialsearchresults bool) *OpenPointInTime { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -418,3 +543,16 @@ func (r *OpenPointInTime) Pretty(pretty bool) *OpenPointInTime { return r } + +// Filter indices if the provided query rewrites to `match_none` on every shard. +// API name: index_filter +func (r *OpenPointInTime) IndexFilter(indexfilter types.QueryVariant) *OpenPointInTime { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexFilter = indexfilter.QueryCaster() + + return r +} diff --git a/typedapi/core/openpointintime/request.go b/typedapi/core/openpointintime/request.go new file mode 100644 index 0000000000..a8cdfe6176 --- /dev/null +++ b/typedapi/core/openpointintime/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package openpointintime + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package openpointintime +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/open_point_in_time/OpenPointInTimeRequest.ts#L25-L121 +type Request struct { + + // IndexFilter Filter indices if the provided query rewrites to `match_none` on every shard. + IndexFilter *types.Query `json:"index_filter,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Openpointintime request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/core/openpointintime/response.go b/typedapi/core/openpointintime/response.go index 29db756db8..729ceb44f7 100644 --- a/typedapi/core/openpointintime/response.go +++ b/typedapi/core/openpointintime/response.go @@ -16,15 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package openpointintime +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + // Response holds the response body struct for the package openpointintime // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/open_point_in_time/OpenPointInTimeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/open_point_in_time/OpenPointInTimeResponse.ts#L23-L29 type Response struct { Id string `json:"id"` + // Shards_ Shards used to create the PIT + Shards_ types.ShardStatistics `json:"_shards"` } // NewResponse returns a Response diff --git a/typedapi/core/ping/ping.go b/typedapi/core/ping/ping.go index 7fd847072b..0997902c7a 100644 --- a/typedapi/core/ping/ping.go +++ b/typedapi/core/ping/ping.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Ping the cluster. -// Returns whether the cluster is running. +// Get information about whether the cluster is running. package ping import ( @@ -68,9 +68,9 @@ func NewPingFunc(tp elastictransport.Interface) NewPing { } // Ping the cluster. -// Returns whether the cluster is running. +// Get information about whether the cluster is running. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html func New(tp elastictransport.Interface) *Ping { r := &Ping{ transport: tp, diff --git a/typedapi/core/putscript/put_script.go b/typedapi/core/putscript/put_script.go index 0752d9eb3d..02de529475 100644 --- a/typedapi/core/putscript/put_script.go +++ b/typedapi/core/putscript/put_script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create or update a script or search template. // Creates or updates a stored script or search template. @@ -88,7 +88,7 @@ func NewPutScriptFunc(tp elastictransport.Interface) NewPutScript { // Create or update a script or search template. // Creates or updates a stored script or search template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/create-stored-script-api.html func New(tp elastictransport.Interface) *PutScript { r := &PutScript{ transport: tp, @@ -96,8 +96,6 @@ func New(tp elastictransport.Interface) *PutScript { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -326,8 +324,8 @@ func (r *PutScript) Header(key, value string) *PutScript { return r } -// Id Identifier for the stored script or search template. -// Must be unique within the cluster. +// Id The identifier for the stored script or search template. +// It must be unique within the cluster. // API Name: id func (r *PutScript) _id(id string) *PutScript { r.paramSet |= idMask @@ -336,7 +334,7 @@ func (r *PutScript) _id(id string) *PutScript { return r } -// Context Context in which the script or search template should run. +// Context The context in which the script or search template should run. // To prevent errors, the API immediately compiles the script or template in // this context. // API Name: context @@ -347,9 +345,10 @@ func (r *PutScript) Context(context string) *PutScript { return r } -// MasterTimeout Period to wait for a connection to the master node. +// MasterTimeout The period to wait for a connection to the master node. // If no response is received before the timeout expires, the request fails and // returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: master_timeout func (r *PutScript) MasterTimeout(duration string) *PutScript { r.values.Set("master_timeout", duration) @@ -357,9 +356,10 @@ func (r *PutScript) MasterTimeout(duration string) *PutScript { return r } -// Timeout Period to wait for a response. +// Timeout The period to wait for a response. // If no response is received before the timeout expires, the request fails and // returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: timeout func (r *PutScript) Timeout(duration string) *PutScript { r.values.Set("timeout", duration) @@ -411,11 +411,15 @@ func (r *PutScript) Pretty(pretty bool) *PutScript { return r } -// Script Contains the script or search template, its parameters, and its language. +// The script or search template, its parameters, and its language. // API name: script -func (r *PutScript) Script(script *types.StoredScript) *PutScript { +func (r *PutScript) Script(script types.StoredScriptVariant) *PutScript { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = *script + r.req.Script = *script.StoredScriptCaster() return r } diff --git a/typedapi/core/putscript/request.go b/typedapi/core/putscript/request.go index ef7321e686..6a75998709 100644 --- a/typedapi/core/putscript/request.go +++ b/typedapi/core/putscript/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putscript @@ -29,10 +29,10 @@ import ( // Request holds the request body struct for the package putscript // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/put_script/PutScriptRequest.ts#L25-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/put_script/PutScriptRequest.ts#L25-L87 type Request struct { - // Script Contains the script or search template, its parameters, and its language. + // Script The script or search template, its parameters, and its language. Script types.StoredScript `json:"script"` } diff --git a/typedapi/core/putscript/response.go b/typedapi/core/putscript/response.go index 0f610a02c7..62a6090faa 100644 --- a/typedapi/core/putscript/response.go +++ b/typedapi/core/putscript/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putscript // Response holds the response body struct for the package putscript // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/put_script/PutScriptResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/put_script/PutScriptResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/core/rankeval/rank_eval.go b/typedapi/core/rankeval/rank_eval.go index 6e6cc25c16..49ca4fabac 100644 --- a/typedapi/core/rankeval/rank_eval.go +++ b/typedapi/core/rankeval/rank_eval.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Enables you to evaluate the quality of ranked search results over a set of -// typical search queries. +// Evaluate ranked search results. +// +// Evaluate the quality of ranked search results over a set of typical search +// queries. package rankeval import ( @@ -81,8 +83,10 @@ func NewRankEvalFunc(tp elastictransport.Interface) NewRankEval { } } -// Enables you to evaluate the quality of ranked search results over a set of -// typical search queries. +// Evaluate ranked search results. +// +// Evaluate the quality of ranked search results over a set of typical search +// queries. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html func New(tp elastictransport.Interface) *RankEval { @@ -92,8 +96,6 @@ func New(tp elastictransport.Interface) *RankEval { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -310,8 +312,9 @@ func (r *RankEval) Header(key, value string) *RankEval { return r } -// Index Comma-separated list of data streams, indices, and index aliases used to -// limit the request. Wildcard (`*`) expressions are supported. +// Index A comma-separated list of data streams, indices, and index aliases used to +// limit the request. +// Wildcard (`*`) expressions are supported. // To target all data streams and indices in a cluster, omit this parameter or // use `_all` or `*`. // API Name: index @@ -407,19 +410,30 @@ func (r *RankEval) Pretty(pretty bool) *RankEval { return r } -// Metric Definition of the evaluation metric to calculate. +// Definition of the evaluation metric to calculate. // API name: metric -func (r *RankEval) Metric(metric *types.RankEvalMetric) *RankEval { +func (r *RankEval) Metric(metric types.RankEvalMetricVariant) *RankEval { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Metric = metric + r.req.Metric = metric.RankEvalMetricCaster() return r } -// Requests A set of typical search requests, together with their provided ratings. +// A set of typical search requests, together with their provided ratings. // API name: requests -func (r *RankEval) Requests(requests ...types.RankEvalRequestItem) *RankEval { - r.req.Requests = requests +func (r *RankEval) Requests(requests ...types.RankEvalRequestItemVariant) *RankEval { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range requests { + + r.req.Requests = append(r.req.Requests, *v.RankEvalRequestItemCaster()) + } return r } diff --git a/typedapi/core/rankeval/request.go b/typedapi/core/rankeval/request.go index 5f6c84f0d6..6bc4eb59b5 100644 --- a/typedapi/core/rankeval/request.go +++ b/typedapi/core/rankeval/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package rankeval @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package rankeval // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/RankEvalRequest.ts#L24-L61 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/RankEvalRequest.ts#L24-L76 type Request struct { // Metric Definition of the evaluation metric to calculate. diff --git a/typedapi/core/rankeval/response.go b/typedapi/core/rankeval/response.go index c355c1ad91..b578f992bb 100644 --- a/typedapi/core/rankeval/response.go +++ b/typedapi/core/rankeval/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package rankeval @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package rankeval // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/RankEvalResponse.ts#L26-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/RankEvalResponse.ts#L26-L34 type Response struct { // Details The details section contains one entry for every query in the original diff --git a/typedapi/core/reindex/reindex.go b/typedapi/core/reindex/reindex.go index e620f2cc9c..cbc9b89ed0 100644 --- a/typedapi/core/reindex/reindex.go +++ b/typedapi/core/reindex/reindex.go @@ -16,12 +16,289 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Reindex documents. -// Copies documents from a source to a destination. The source can be any -// existing index, alias, or data stream. The destination must differ from the -// source. For example, you cannot reindex a data stream into itself. +// +// Copy documents from a source to a destination. +// You can copy all documents to the destination index or reindex a subset of +// the documents. +// The source can be any existing index, alias, or data stream. +// The destination must differ from the source. +// For example, you cannot reindex a data stream into itself. +// +// IMPORTANT: Reindex requires `_source` to be enabled for all documents in the +// source. +// The destination should be configured as wanted before calling the reindex +// API. +// Reindex does not copy the settings from the source or its associated +// template. +// Mappings, shard counts, and replicas, for example, must be configured ahead +// of time. +// +// If the Elasticsearch security features are enabled, you must have the +// following security privileges: +// +// * The `read` index privilege for the source data stream, index, or alias. +// * The `write` index privilege for the destination data stream, index, or +// index alias. +// * To automatically create a data stream or index with a reindex API request, +// you must have the `auto_configure`, `create_index`, or `manage` index +// privilege for the destination data stream, index, or alias. +// * If reindexing from a remote cluster, the `source.remote.user` must have the +// `monitor` cluster privilege and the `read` index privilege for the source +// data stream, index, or alias. +// +// If reindexing from a remote cluster, you must explicitly allow the remote +// host in the `reindex.remote.whitelist` setting. +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The `dest` element can be configured like the index API to control optimistic +// concurrency control. +// Omitting `version_type` or setting it to `internal` causes Elasticsearch to +// blindly dump documents into the destination, overwriting any that happen to +// have the same ID. +// +// Setting `version_type` to `external` causes Elasticsearch to preserve the +// `version` from the source, create any documents that are missing, and update +// any documents that have an older version in the destination than they do in +// the source. +// +// Setting `op_type` to `create` causes the reindex API to create only missing +// documents in the destination. +// All existing documents will cause a version conflict. +// +// IMPORTANT: Because data streams are append-only, any reindex request to a +// destination data stream must have an `op_type` of `create`. +// A reindex can only add new documents to a destination data stream. +// It cannot update existing documents in a destination data stream. +// +// By default, version conflicts abort the reindex process. +// To continue reindexing if there are conflicts, set the `conflicts` request +// body property to `proceed`. +// In this case, the response includes a count of the version conflicts that +// were encountered. +// Note that the handling of other error types is unaffected by the `conflicts` +// property. +// Additionally, if you opt to count version conflicts, the operation could +// attempt to reindex more documents from the source than `max_docs` until it +// has successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. +// +// NOTE: The reindex API makes no effort to handle ID collisions. +// The last document written will "win" but the order isn't usually predictable +// so it is not a good idea to rely on this behavior. +// Instead, make sure that IDs are unique by using a script. +// +// **Running reindex asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to cancel or get the status of the task. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// +// **Reindex from multiple sources** +// +// If you have many sources to reindex it is generally better to reindex them +// one at a time rather than using a glob pattern to pick up multiple sources. +// That way you can resume the process if there are any errors by removing the +// partially completed source and starting over. +// It also makes parallelizing the process fairly simple: split the list of +// sources to reindex and run each list in parallel. +// +// For example, you can use a bash script like this: +// +// ``` +// for index in i1 i2 i3 i4 i5; do +// +// curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty +// +// -d'{ +// "source": { +// "index": "'$index'" +// }, +// "dest": { +// "index": "'$index'-reindexed" +// } +// }' +// +// done +// ``` +// +// **Throttling** +// +// Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, +// for example) to throttle the rate at which reindex issues batches of index +// operations. +// Requests are throttled by padding each batch with a wait time. +// To turn off throttling, set `requests_per_second` to `-1`. +// +// The throttling is done by waiting between batches so that the scroll that +// reindex uses internally can be given a timeout that takes into account the +// padding. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single bulk request, large batch sizes cause +// Elasticsearch to create many requests and then wait for a while before +// starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Reindex supports sliced scroll to parallelize the reindexing process. +// This parallelization can improve efficiency and provide a convenient way to +// break the request down into smaller parts. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// You can slice a reindex request manually by providing a slice ID and total +// number of slices to each request. +// You can also let reindex automatically parallelize by using sliced scroll to +// slice on `_id`. +// The `slices` parameter specifies the number of slices to use. +// +// Adding `slices` to the reindex request just automates the manual process, +// creating sub-requests which means it has some quirks: +// +// * You can see these requests in the tasks API. These sub-requests are "child" +// tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices`, each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the previous point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being reindexed. +// * Each sub-request gets a slightly different snapshot of the source, though +// these are all taken at approximately the same time. +// +// If slicing automatically, setting `slices` to `auto` will choose a reasonable +// number for most indices. +// If slicing manually or otherwise tuning automatic slicing, use the following +// guidelines. +// +// Query performance is most efficient when the number of slices is equal to the +// number of shards in the index. +// If that number is large (for example, `500`), choose a lower number as too +// many slices will hurt performance. +// Setting slices higher than the number of shards generally does not improve +// efficiency and adds overhead. +// +// Indexing performance scales linearly across available resources with the +// number of slices. +// +// Whether query or indexing performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Modify documents during reindexing** +// +// Like `_update_by_query`, reindex operations support a script that modifies +// the document. +// Unlike `_update_by_query`, the script is allowed to modify the document's +// metadata. +// +// Just as in `_update_by_query`, you can set `ctx.op` to change the operation +// that is run on the destination. +// For example, set `ctx.op` to `noop` if your script decides that the document +// doesn’t have to be indexed in the destination. This "no operation" will be +// reported in the `noop` counter in the response body. +// Set `ctx.op` to `delete` if your script decides that the document must be +// deleted from the destination. +// The deletion will be reported in the `deleted` counter in the response body. +// Setting `ctx.op` to anything else will return an error, as will setting any +// other field in `ctx`. +// +// Think of the possibilities! Just be careful; you are able to change: +// +// * `_id` +// * `_index` +// * `_version` +// * `_routing` +// +// Setting `_version` to `null` or clearing it from the `ctx` map is just like +// not sending the version in an indexing request. +// It will cause the document to be overwritten in the destination regardless of +// the version on the target or the version type you use in the reindex API. +// +// **Reindex from remote** +// +// Reindex supports reindexing from a remote Elasticsearch cluster. +// The `host` parameter must contain a scheme, host, port, and optional path. +// The `username` and `password` parameters are optional and when they are +// present the reindex operation will connect to the remote Elasticsearch node +// using basic authentication. +// Be sure to use HTTPS when using basic authentication or the password will be +// sent in plain text. +// There are a range of settings available to configure the behavior of the +// HTTPS connection. +// +// When using Elastic Cloud, it is also possible to authenticate against the +// remote cluster through the use of a valid API key. +// Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` +// setting. +// It can be set to a comma delimited list of allowed remote host and port +// combinations. +// Scheme is ignored; only the host and port are used. +// For example: +// +// ``` +// reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, +// localhost:*"] +// ``` +// +// The list of allowed hosts must be configured on any nodes that will +// coordinate the reindex. +// This feature should work with remote clusters of any version of +// Elasticsearch. +// This should enable you to upgrade from any version of Elasticsearch to the +// current version by reindexing from a cluster of the old version. +// +// WARNING: Elasticsearch does not support forward compatibility across major +// versions. +// For example, you cannot reindex from a 7.x cluster into a 6.x cluster. +// +// To enable queries sent to older versions of Elasticsearch, the `query` +// parameter is sent directly to the remote host without validation or +// modification. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// Reindexing from a remote server uses an on-heap buffer that defaults to a +// maximum size of 100mb. +// If the remote index includes very large documents you'll need to use a +// smaller batch size. +// It is also possible to set the socket read timeout on the remote connection +// with the `socket_timeout` field and the connection timeout with the +// `connect_timeout` field. +// Both default to 30 seconds. +// +// **Configuring SSL parameters** +// +// Reindex from remote supports configurable SSL settings. +// These must be specified in the `elasticsearch.yml` file, with the exception +// of the secure settings, which you add in the Elasticsearch keystore. +// It is not possible to configure SSL in the body of the reindex request. package reindex import ( @@ -78,9 +355,286 @@ func NewReindexFunc(tp elastictransport.Interface) NewReindex { } // Reindex documents. -// Copies documents from a source to a destination. The source can be any -// existing index, alias, or data stream. The destination must differ from the -// source. For example, you cannot reindex a data stream into itself. +// +// Copy documents from a source to a destination. +// You can copy all documents to the destination index or reindex a subset of +// the documents. +// The source can be any existing index, alias, or data stream. +// The destination must differ from the source. +// For example, you cannot reindex a data stream into itself. +// +// IMPORTANT: Reindex requires `_source` to be enabled for all documents in the +// source. +// The destination should be configured as wanted before calling the reindex +// API. +// Reindex does not copy the settings from the source or its associated +// template. +// Mappings, shard counts, and replicas, for example, must be configured ahead +// of time. +// +// If the Elasticsearch security features are enabled, you must have the +// following security privileges: +// +// * The `read` index privilege for the source data stream, index, or alias. +// * The `write` index privilege for the destination data stream, index, or +// index alias. +// * To automatically create a data stream or index with a reindex API request, +// you must have the `auto_configure`, `create_index`, or `manage` index +// privilege for the destination data stream, index, or alias. +// * If reindexing from a remote cluster, the `source.remote.user` must have the +// `monitor` cluster privilege and the `read` index privilege for the source +// data stream, index, or alias. +// +// If reindexing from a remote cluster, you must explicitly allow the remote +// host in the `reindex.remote.whitelist` setting. +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The `dest` element can be configured like the index API to control optimistic +// concurrency control. +// Omitting `version_type` or setting it to `internal` causes Elasticsearch to +// blindly dump documents into the destination, overwriting any that happen to +// have the same ID. +// +// Setting `version_type` to `external` causes Elasticsearch to preserve the +// `version` from the source, create any documents that are missing, and update +// any documents that have an older version in the destination than they do in +// the source. +// +// Setting `op_type` to `create` causes the reindex API to create only missing +// documents in the destination. +// All existing documents will cause a version conflict. +// +// IMPORTANT: Because data streams are append-only, any reindex request to a +// destination data stream must have an `op_type` of `create`. +// A reindex can only add new documents to a destination data stream. +// It cannot update existing documents in a destination data stream. +// +// By default, version conflicts abort the reindex process. +// To continue reindexing if there are conflicts, set the `conflicts` request +// body property to `proceed`. +// In this case, the response includes a count of the version conflicts that +// were encountered. +// Note that the handling of other error types is unaffected by the `conflicts` +// property. +// Additionally, if you opt to count version conflicts, the operation could +// attempt to reindex more documents from the source than `max_docs` until it +// has successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. +// +// NOTE: The reindex API makes no effort to handle ID collisions. +// The last document written will "win" but the order isn't usually predictable +// so it is not a good idea to rely on this behavior. +// Instead, make sure that IDs are unique by using a script. +// +// **Running reindex asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to cancel or get the status of the task. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// +// **Reindex from multiple sources** +// +// If you have many sources to reindex it is generally better to reindex them +// one at a time rather than using a glob pattern to pick up multiple sources. +// That way you can resume the process if there are any errors by removing the +// partially completed source and starting over. +// It also makes parallelizing the process fairly simple: split the list of +// sources to reindex and run each list in parallel. +// +// For example, you can use a bash script like this: +// +// ``` +// for index in i1 i2 i3 i4 i5; do +// +// curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty +// +// -d'{ +// "source": { +// "index": "'$index'" +// }, +// "dest": { +// "index": "'$index'-reindexed" +// } +// }' +// +// done +// ``` +// +// **Throttling** +// +// Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, +// for example) to throttle the rate at which reindex issues batches of index +// operations. +// Requests are throttled by padding each batch with a wait time. +// To turn off throttling, set `requests_per_second` to `-1`. +// +// The throttling is done by waiting between batches so that the scroll that +// reindex uses internally can be given a timeout that takes into account the +// padding. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single bulk request, large batch sizes cause +// Elasticsearch to create many requests and then wait for a while before +// starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Reindex supports sliced scroll to parallelize the reindexing process. +// This parallelization can improve efficiency and provide a convenient way to +// break the request down into smaller parts. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// You can slice a reindex request manually by providing a slice ID and total +// number of slices to each request. +// You can also let reindex automatically parallelize by using sliced scroll to +// slice on `_id`. +// The `slices` parameter specifies the number of slices to use. +// +// Adding `slices` to the reindex request just automates the manual process, +// creating sub-requests which means it has some quirks: +// +// * You can see these requests in the tasks API. These sub-requests are "child" +// tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices`, each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the previous point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being reindexed. +// * Each sub-request gets a slightly different snapshot of the source, though +// these are all taken at approximately the same time. +// +// If slicing automatically, setting `slices` to `auto` will choose a reasonable +// number for most indices. +// If slicing manually or otherwise tuning automatic slicing, use the following +// guidelines. +// +// Query performance is most efficient when the number of slices is equal to the +// number of shards in the index. +// If that number is large (for example, `500`), choose a lower number as too +// many slices will hurt performance. +// Setting slices higher than the number of shards generally does not improve +// efficiency and adds overhead. +// +// Indexing performance scales linearly across available resources with the +// number of slices. +// +// Whether query or indexing performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Modify documents during reindexing** +// +// Like `_update_by_query`, reindex operations support a script that modifies +// the document. +// Unlike `_update_by_query`, the script is allowed to modify the document's +// metadata. +// +// Just as in `_update_by_query`, you can set `ctx.op` to change the operation +// that is run on the destination. +// For example, set `ctx.op` to `noop` if your script decides that the document +// doesn’t have to be indexed in the destination. This "no operation" will be +// reported in the `noop` counter in the response body. +// Set `ctx.op` to `delete` if your script decides that the document must be +// deleted from the destination. +// The deletion will be reported in the `deleted` counter in the response body. +// Setting `ctx.op` to anything else will return an error, as will setting any +// other field in `ctx`. +// +// Think of the possibilities! Just be careful; you are able to change: +// +// * `_id` +// * `_index` +// * `_version` +// * `_routing` +// +// Setting `_version` to `null` or clearing it from the `ctx` map is just like +// not sending the version in an indexing request. +// It will cause the document to be overwritten in the destination regardless of +// the version on the target or the version type you use in the reindex API. +// +// **Reindex from remote** +// +// Reindex supports reindexing from a remote Elasticsearch cluster. +// The `host` parameter must contain a scheme, host, port, and optional path. +// The `username` and `password` parameters are optional and when they are +// present the reindex operation will connect to the remote Elasticsearch node +// using basic authentication. +// Be sure to use HTTPS when using basic authentication or the password will be +// sent in plain text. +// There are a range of settings available to configure the behavior of the +// HTTPS connection. +// +// When using Elastic Cloud, it is also possible to authenticate against the +// remote cluster through the use of a valid API key. +// Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` +// setting. +// It can be set to a comma delimited list of allowed remote host and port +// combinations. +// Scheme is ignored; only the host and port are used. +// For example: +// +// ``` +// reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, +// localhost:*"] +// ``` +// +// The list of allowed hosts must be configured on any nodes that will +// coordinate the reindex. +// This feature should work with remote clusters of any version of +// Elasticsearch. +// This should enable you to upgrade from any version of Elasticsearch to the +// current version by reindexing from a cluster of the old version. +// +// WARNING: Elasticsearch does not support forward compatibility across major +// versions. +// For example, you cannot reindex from a 7.x cluster into a 6.x cluster. +// +// To enable queries sent to older versions of Elasticsearch, the `query` +// parameter is sent directly to the remote host without validation or +// modification. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// Reindexing from a remote server uses an on-heap buffer that defaults to a +// maximum size of 100mb. +// If the remote index includes very large documents you'll need to use a +// smaller batch size. +// It is also possible to set the socket read timeout on the remote connection +// with the `socket_timeout` field and the connection timeout with the +// `connect_timeout` field. +// Both default to 30 seconds. +// +// **Configuring SSL parameters** +// +// Reindex from remote supports configurable SSL settings. +// These must be specified in the `elasticsearch.yml` file, with the exception +// of the secure settings, which you add in the Elasticsearch keystore. +// It is not possible to configure SSL in the body of the reindex request. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html func New(tp elastictransport.Interface) *Reindex { @@ -90,8 +644,6 @@ func New(tp elastictransport.Interface) *Reindex { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -307,7 +859,7 @@ func (r *Reindex) Refresh(refresh bool) *Reindex { } // RequestsPerSecond The throttle for this request in sub-requests per second. -// Defaults to no throttle. +// By default, there is no throttle. // API name: requests_per_second func (r *Reindex) RequestsPerSecond(requestspersecond string) *Reindex { r.values.Set("requests_per_second", requestspersecond) @@ -315,8 +867,8 @@ func (r *Reindex) RequestsPerSecond(requestspersecond string) *Reindex { return r } -// Scroll Specifies how long a consistent view of the index should be maintained for -// scrolled search. +// Scroll The period of time that a consistent view of the index should be maintained +// for scrolled search. // API name: scroll func (r *Reindex) Scroll(duration string) *Reindex { r.values.Set("scroll", duration) @@ -325,7 +877,19 @@ func (r *Reindex) Scroll(duration string) *Reindex { } // Slices The number of slices this task should be divided into. -// Defaults to 1 slice, meaning the task isn’t sliced into subtasks. +// It defaults to one slice, which means the task isn't sliced into subtasks. +// +// Reindex supports sliced scroll to parallelize the reindexing process. +// This parallelization can improve efficiency and provide a convenient way to +// break the request down into smaller parts. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// If set to `auto`, Elasticsearch chooses the number of slices to use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple sources, it will choose the number of slices based on +// the index or backing index with the smallest number of shards. // API name: slices func (r *Reindex) Slices(slices string) *Reindex { r.values.Set("slices", slices) @@ -333,8 +897,10 @@ func (r *Reindex) Slices(slices string) *Reindex { return r } -// Timeout Period each indexing waits for automatic index creation, dynamic mapping +// Timeout The period each indexing waits for automatic index creation, dynamic mapping // updates, and waiting for active shards. +// By default, Elasticsearch waits for at least one minute before failing. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Reindex) Timeout(duration string) *Reindex { r.values.Set("timeout", duration) @@ -344,8 +910,10 @@ func (r *Reindex) Timeout(duration string) *Reindex { // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. -// Set to `all` or any positive integer up to the total number of shards in the -// index (`number_of_replicas+1`). +// Set it to `all` or any positive integer up to the total number of shards in +// the index (`number_of_replicas+1`). +// The default value is one, which means it waits for each primary shard to be +// active. // API name: wait_for_active_shards func (r *Reindex) WaitForActiveShards(waitforactiveshards string) *Reindex { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -413,54 +981,85 @@ func (r *Reindex) Pretty(pretty bool) *Reindex { return r } -// Conflicts Set to proceed to continue reindexing even if there are conflicts. +// Indicates whether to continue reindexing even when there are conflicts. // API name: conflicts func (r *Reindex) Conflicts(conflicts conflicts.Conflicts) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Conflicts = &conflicts - return r } -// Dest The destination you are copying to. +// The destination you are copying to. // API name: dest -func (r *Reindex) Dest(dest *types.ReindexDestination) *Reindex { +func (r *Reindex) Dest(dest types.ReindexDestinationVariant) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = *dest + r.req.Dest = *dest.ReindexDestinationCaster() return r } -// MaxDocs The maximum number of documents to reindex. +// The maximum number of documents to reindex. +// By default, all documents are reindexed. +// If it is a value less then or equal to `scroll_size`, a scroll will not be +// used to retrieve the results for the operation. +// +// If `conflicts` is set to `proceed`, the reindex operation could attempt to +// reindex more documents from the source than `max_docs` until it has +// successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. // API name: max_docs func (r *Reindex) MaxDocs(maxdocs int64) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxDocs = &maxdocs return r } -// Script The script to run to update the document source or metadata when reindexing. +// The script to run to update the document source or metadata when reindexing. // API name: script -func (r *Reindex) Script(script *types.Script) *Reindex { +func (r *Reindex) Script(script types.ScriptVariant) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = script + r.req.Script = script.ScriptCaster() return r } // API name: size func (r *Reindex) Size(size int64) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Size = &size return r } -// Source The source you are copying from. +// The source you are copying from. // API name: source -func (r *Reindex) Source(source *types.ReindexSource) *Reindex { +func (r *Reindex) Source(source types.ReindexSourceVariant) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = *source + r.req.Source = *source.ReindexSourceCaster() return r } diff --git a/typedapi/core/reindex/request.go b/typedapi/core/reindex/request.go index dd8f8e1d82..c7d4146dd3 100644 --- a/typedapi/core/reindex/request.go +++ b/typedapi/core/reindex/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package reindex @@ -30,14 +30,22 @@ import ( // Request holds the request body struct for the package reindex // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/reindex/ReindexRequest.ts#L27-L103 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/reindex/ReindexRequest.ts#L27-L309 type Request struct { - // Conflicts Set to proceed to continue reindexing even if there are conflicts. + // Conflicts Indicates whether to continue reindexing even when there are conflicts. Conflicts *conflicts.Conflicts `json:"conflicts,omitempty"` // Dest The destination you are copying to. Dest types.ReindexDestination `json:"dest"` // MaxDocs The maximum number of documents to reindex. + // By default, all documents are reindexed. + // If it is a value less then or equal to `scroll_size`, a scroll will not be + // used to retrieve the results for the operation. + // + // If `conflicts` is set to `proceed`, the reindex operation could attempt to + // reindex more documents from the source than `max_docs` until it has + // successfully indexed `max_docs` documents into the target or it has gone + // through every document in the source query. MaxDocs *int64 `json:"max_docs,omitempty"` // Script The script to run to update the document source or metadata when reindexing. Script *types.Script `json:"script,omitempty"` diff --git a/typedapi/core/reindex/response.go b/typedapi/core/reindex/response.go index 63817ea43d..fe13c77070 100644 --- a/typedapi/core/reindex/response.go +++ b/typedapi/core/reindex/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package reindex @@ -26,24 +26,53 @@ import ( // Response holds the response body struct for the package reindex // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/reindex/ReindexResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/reindex/ReindexResponse.ts#L26-L92 type Response struct { - Batches *int64 `json:"batches,omitempty"` - Created *int64 `json:"created,omitempty"` - Deleted *int64 `json:"deleted,omitempty"` - Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` - Noops *int64 `json:"noops,omitempty"` - RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` - Retries *types.Retries `json:"retries,omitempty"` - SliceId *int `json:"slice_id,omitempty"` - Task types.TaskId `json:"task,omitempty"` - ThrottledMillis *int64 `json:"throttled_millis,omitempty"` - ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` - TimedOut *bool `json:"timed_out,omitempty"` - Took *int64 `json:"took,omitempty"` - Total *int64 `json:"total,omitempty"` - Updated *int64 `json:"updated,omitempty"` - VersionConflicts *int64 `json:"version_conflicts,omitempty"` + + // Batches The number of scroll responses that were pulled back by the reindex. + Batches *int64 `json:"batches,omitempty"` + // Created The number of documents that were successfully created. + Created *int64 `json:"created,omitempty"` + // Deleted The number of documents that were successfully deleted. + Deleted *int64 `json:"deleted,omitempty"` + // Failures If there were any unrecoverable errors during the process, it is an array of + // those failures. + // If this array is not empty, the request ended because of those failures. + // Reindex is implemented using batches and any failure causes the entire + // process to end but all failures in the current batch are collected into the + // array. + // You can use the `conflicts` option to prevent the reindex from ending on + // version conflicts. + Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` + // Noops The number of documents that were ignored because the script used for the + // reindex returned a `noop` value for `ctx.op`. + Noops *int64 `json:"noops,omitempty"` + // RequestsPerSecond The number of requests per second effectively run during the reindex. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Retries The number of retries attempted by reindex. + Retries *types.Retries `json:"retries,omitempty"` + SliceId *int `json:"slice_id,omitempty"` + Task types.TaskId `json:"task,omitempty"` + // ThrottledMillis The number of milliseconds the request slept to conform to + // `requests_per_second`. + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in a reindex response. + // It has meaning only when using the task API, where it indicates the next time + // (in milliseconds since epoch) that a throttled request will be run again in + // order to conform to `requests_per_second`. + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` + // TimedOut If any of the requests that ran during the reindex timed out, it is `true`. + TimedOut *bool `json:"timed_out,omitempty"` + // Took The total milliseconds the entire operation took. + Took *int64 `json:"took,omitempty"` + // Total The number of documents that were successfully processed. + Total *int64 `json:"total,omitempty"` + // Updated The number of documents that were successfully updated. + // That is to say, a document with the same ID already existed before the + // reindex updated it. + Updated *int64 `json:"updated,omitempty"` + // VersionConflicts The number of version conflicts that occurred. + VersionConflicts *int64 `json:"version_conflicts,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/core/reindexrethrottle/reindex_rethrottle.go b/typedapi/core/reindexrethrottle/reindex_rethrottle.go index 3a851c5656..33a9422afb 100644 --- a/typedapi/core/reindexrethrottle/reindex_rethrottle.go +++ b/typedapi/core/reindexrethrottle/reindex_rethrottle.go @@ -16,9 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Copies documents from a source to a destination. +// Throttle a reindex operation. +// +// Change the number of requests per second for a particular reindex operation. +// For example: +// +// ``` +// POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +// ``` +// +// Rethrottling that speeds up the query takes effect immediately. +// Rethrottling that slows down the query will take effect after completing the +// current batch. +// This behavior prevents scroll timeouts. package reindexrethrottle import ( @@ -76,7 +88,19 @@ func NewReindexRethrottleFunc(tp elastictransport.Interface) NewReindexRethrottl } } -// Copies documents from a source to a destination. +// Throttle a reindex operation. +// +// Change the number of requests per second for a particular reindex operation. +// For example: +// +// ``` +// POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +// ``` +// +// Rethrottling that speeds up the query takes effect immediately. +// Rethrottling that slows down the query will take effect after completing the +// current batch. +// This behavior prevents scroll timeouts. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html func New(tp elastictransport.Interface) *ReindexRethrottle { @@ -290,7 +314,7 @@ func (r *ReindexRethrottle) Header(key, value string) *ReindexRethrottle { return r } -// TaskId Identifier for the task. +// TaskId The task identifier, which can be found by using the tasks API. // API Name: taskid func (r *ReindexRethrottle) _taskid(taskid string) *ReindexRethrottle { r.paramSet |= taskidMask @@ -300,6 +324,8 @@ func (r *ReindexRethrottle) _taskid(taskid string) *ReindexRethrottle { } // RequestsPerSecond The throttle for this request in sub-requests per second. +// It can be either `-1` to turn off throttling or any decimal number like `1.7` +// or `12` to throttle to that level. // API name: requests_per_second func (r *ReindexRethrottle) RequestsPerSecond(requestspersecond string) *ReindexRethrottle { r.values.Set("requests_per_second", requestspersecond) diff --git a/typedapi/core/reindexrethrottle/response.go b/typedapi/core/reindexrethrottle/response.go index a1fb7f1de6..ae3b3c4cbe 100644 --- a/typedapi/core/reindexrethrottle/response.go +++ b/typedapi/core/reindexrethrottle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package reindexrethrottle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package reindexrethrottle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/reindex_rethrottle/ReindexRethrottleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/reindex_rethrottle/ReindexRethrottleResponse.ts#L23-L25 type Response struct { Nodes map[string]types.ReindexNode `json:"nodes"` } diff --git a/typedapi/core/rendersearchtemplate/render_search_template.go b/typedapi/core/rendersearchtemplate/render_search_template.go index e669b02f60..b9a76e8858 100644 --- a/typedapi/core/rendersearchtemplate/render_search_template.go +++ b/typedapi/core/rendersearchtemplate/render_search_template.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Renders a search template as a search request body. +// Render a search template. +// +// Render a search template as a search request body. package rendersearchtemplate import ( @@ -37,10 +39,6 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) -const ( - idMask = iota + 1 -) - // ErrBuildPath is returned in case of missing parameters within the build of the request. var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") @@ -79,7 +77,9 @@ func NewRenderSearchTemplateFunc(tp elastictransport.Interface) NewRenderSearchT } } -// Renders a search template as a search request body. +// Render a search template. +// +// Render a search template as a search request body. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html func New(tp elastictransport.Interface) *RenderSearchTemplate { @@ -89,8 +89,6 @@ func New(tp elastictransport.Interface) *RenderSearchTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -160,19 +158,6 @@ func (r *RenderSearchTemplate) HttpRequest(ctx context.Context) (*http.Request, path.WriteString("/") path.WriteString("template") - method = http.MethodPost - case r.paramSet == idMask: - path.WriteString("/") - path.WriteString("_render") - path.WriteString("/") - path.WriteString("template") - path.WriteString("/") - - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordPathPart(ctx, "id", r.id) - } - path.WriteString(r.id) - method = http.MethodPost } @@ -311,17 +296,6 @@ func (r *RenderSearchTemplate) Header(key, value string) *RenderSearchTemplate { return r } -// Id ID of the search template to render. -// If no `source` is specified, this or the `id` request body parameter is -// required. -// API Name: id -func (r *RenderSearchTemplate) Id(id string) *RenderSearchTemplate { - r.paramSet |= idMask - r.id = id - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -368,29 +342,75 @@ func (r *RenderSearchTemplate) Pretty(pretty bool) *RenderSearchTemplate { // API name: file func (r *RenderSearchTemplate) File(file string) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.File = &file return r } -// Params Key-value pairs used to replace Mustache variables in the template. +// The ID of the search template to render. +// If no `source` is specified, this or the `` request path +// parameter is required. +// If you specify both this parameter and the `` parameter, the API +// uses only ``. +// API name: id +func (r *RenderSearchTemplate) Id(id string) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Id = &id + + return r +} + +// Key-value pairs used to replace Mustache variables in the template. // The key is the variable name. // The value is the variable value. // API name: params func (r *RenderSearchTemplate) Params(params map[string]json.RawMessage) *RenderSearchTemplate { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Params = params + return r +} +func (r *RenderSearchTemplate) AddParam(key string, value json.RawMessage) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp return r } -// Source An inline search template. -// Supports the same parameters as the search API's request body. +// An inline search template. +// It supports the same parameters as the search API's request body. // These parameters also support Mustache variables. // If no `id` or `` is specified, this parameter is required. // API name: source func (r *RenderSearchTemplate) Source(source string) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Source = &source diff --git a/typedapi/core/rendersearchtemplate/request.go b/typedapi/core/rendersearchtemplate/request.go index d33b139c2a..ee1ae7123d 100644 --- a/typedapi/core/rendersearchtemplate/request.go +++ b/typedapi/core/rendersearchtemplate/request.go @@ -16,26 +16,36 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package rendersearchtemplate import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // Request holds the request body struct for the package rendersearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/render_search_template/RenderSearchTemplateRequest.ts#L25-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/render_search_template/RenderSearchTemplateRequest.ts#L25-L76 type Request struct { File *string `json:"file,omitempty"` + // Id The ID of the search template to render. + // If no `source` is specified, this or the `` request path + // parameter is required. + // If you specify both this parameter and the `` parameter, the API + // uses only ``. + Id *string `json:"id,omitempty"` // Params Key-value pairs used to replace Mustache variables in the template. // The key is the variable name. // The value is the variable value. Params map[string]json.RawMessage `json:"params,omitempty"` // Source An inline search template. - // Supports the same parameters as the search API's request body. + // It supports the same parameters as the search API's request body. // These parameters also support Mustache variables. // If no `id` or `` is specified, this parameter is required. Source *string `json:"source,omitempty"` @@ -61,3 +71,59 @@ func (r *Request) FromJSON(data string) (*Request, error) { return &req, nil } + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "File", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.File = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + } + } + return nil +} diff --git a/typedapi/core/rendersearchtemplate/response.go b/typedapi/core/rendersearchtemplate/response.go index 19a11a5265..4cac1637d3 100644 --- a/typedapi/core/rendersearchtemplate/response.go +++ b/typedapi/core/rendersearchtemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package rendersearchtemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package rendersearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/render_search_template/RenderSearchTemplateResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/render_search_template/RenderSearchTemplateResponse.ts#L23-L25 type Response struct { TemplateOutput map[string]json.RawMessage `json:"template_output"` } diff --git a/typedapi/core/scriptspainlessexecute/request.go b/typedapi/core/scriptspainlessexecute/request.go index 8e068fb7be..fb15d0bc1a 100644 --- a/typedapi/core/scriptspainlessexecute/request.go +++ b/typedapi/core/scriptspainlessexecute/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package scriptspainlessexecute @@ -25,18 +25,22 @@ import ( "fmt" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/painlesscontext" ) // Request holds the request body struct for the package scriptspainlessexecute // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/scripts_painless_execute/ExecutePainlessScriptRequest.ts#L24-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/scripts_painless_execute/ExecutePainlessScriptRequest.ts#L24-L64 type Request struct { // Context The context that the script should run in. - Context *string `json:"context,omitempty"` + // NOTE: Result ordering in the field contexts is not guaranteed. + Context *painlesscontext.PainlessContext `json:"context,omitempty"` // ContextSetup Additional parameters for the `context`. + // NOTE: This parameter is required for all contexts except `painless_test`, + // which is the default if no value is provided for `context`. ContextSetup *types.PainlessContextSetup `json:"context_setup,omitempty"` - // Script The Painless script to execute. + // Script The Painless script to run. Script *types.Script `json:"script,omitempty"` } diff --git a/typedapi/core/scriptspainlessexecute/response.go b/typedapi/core/scriptspainlessexecute/response.go index efdab61c51..b20d5f0993 100644 --- a/typedapi/core/scriptspainlessexecute/response.go +++ b/typedapi/core/scriptspainlessexecute/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package scriptspainlessexecute @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package scriptspainlessexecute // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/scripts_painless_execute/ExecutePainlessScriptResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/scripts_painless_execute/ExecutePainlessScriptResponse.ts#L20-L24 type Response struct { Result json.RawMessage `json:"result,omitempty"` } diff --git a/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go b/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go index 4a9b5994a6..c2fb16a269 100644 --- a/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go +++ b/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go @@ -16,10 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Run a script. +// // Runs a script and returns a result. +// Use this API to build and test scripts, such as when defining a script for a +// runtime field. +// This API requires very few dependencies and is especially useful if you don't +// have permissions to write documents on a cluster. +// +// The API uses several _contexts_, which control how scripts are run, what +// variables are available at runtime, and what the return type is. +// +// Each context requires a script, but additional parameters depend on the +// context you're using for that script. package scriptspainlessexecute import ( @@ -36,6 +47,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/painlesscontext" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -75,7 +87,18 @@ func NewScriptsPainlessExecuteFunc(tp elastictransport.Interface) NewScriptsPain } // Run a script. +// // Runs a script and returns a result. +// Use this API to build and test scripts, such as when defining a script for a +// runtime field. +// This API requires very few dependencies and is especially useful if you don't +// have permissions to write documents on a cluster. +// +// The API uses several _contexts_, which control how scripts are run, what +// variables are available at runtime, and what the return type is. +// +// Each context requires a script, but additional parameters depend on the +// context you're using for that script. // // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html func New(tp elastictransport.Interface) *ScriptsPainlessExecute { @@ -85,8 +108,6 @@ func New(tp elastictransport.Interface) *ScriptsPainlessExecute { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -340,29 +361,42 @@ func (r *ScriptsPainlessExecute) Pretty(pretty bool) *ScriptsPainlessExecute { return r } -// Context The context that the script should run in. +// The context that the script should run in. +// NOTE: Result ordering in the field contexts is not guaranteed. // API name: context -func (r *ScriptsPainlessExecute) Context(context string) *ScriptsPainlessExecute { - +func (r *ScriptsPainlessExecute) Context(context painlesscontext.PainlessContext) *ScriptsPainlessExecute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Context = &context - return r } -// ContextSetup Additional parameters for the `context`. +// Additional parameters for the `context`. +// NOTE: This parameter is required for all contexts except `painless_test`, +// which is the default if no value is provided for `context`. // API name: context_setup -func (r *ScriptsPainlessExecute) ContextSetup(contextsetup *types.PainlessContextSetup) *ScriptsPainlessExecute { +func (r *ScriptsPainlessExecute) ContextSetup(contextsetup types.PainlessContextSetupVariant) *ScriptsPainlessExecute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ContextSetup = contextsetup + r.req.ContextSetup = contextsetup.PainlessContextSetupCaster() return r } -// Script The Painless script to execute. +// The Painless script to run. // API name: script -func (r *ScriptsPainlessExecute) Script(script *types.Script) *ScriptsPainlessExecute { +func (r *ScriptsPainlessExecute) Script(script types.ScriptVariant) *ScriptsPainlessExecute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = script + r.req.Script = script.ScriptCaster() return r } diff --git a/typedapi/core/scroll/request.go b/typedapi/core/scroll/request.go index 5370f5c2f0..ff92b3f4a0 100644 --- a/typedapi/core/scroll/request.go +++ b/typedapi/core/scroll/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package scroll @@ -32,12 +32,12 @@ import ( // Request holds the request body struct for the package scroll // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/scroll/ScrollRequest.ts#L24-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/scroll/ScrollRequest.ts#L24-L88 type Request struct { - // Scroll Period to retain the search context for scrolling. + // Scroll The period to retain the search context for scrolling. Scroll types.Duration `json:"scroll,omitempty"` - // ScrollId Scroll ID of the search. + // ScrollId The scroll ID of the search. ScrollId string `json:"scroll_id"` } diff --git a/typedapi/core/scroll/response.go b/typedapi/core/scroll/response.go index dfa0febd0b..8a5102438c 100644 --- a/typedapi/core/scroll/response.go +++ b/typedapi/core/scroll/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package scroll @@ -34,22 +34,46 @@ import ( // Response holds the response body struct for the package scroll // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/scroll/ScrollResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/scroll/ScrollResponse.ts#L22-L24 type Response struct { - Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` - Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits types.HitsMetadata `json:"hits"` - MaxScore *types.Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *types.Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. Shards_ types.ShardStatistics `json:"_shards"` Suggest map[string][]types.Suggest `json:"suggest,omitempty"` TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` } // NewResponse returns a Response @@ -504,6 +528,13 @@ func (s *Response) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { diff --git a/typedapi/core/scroll/scroll.go b/typedapi/core/scroll/scroll.go index 3569e24b8a..6847f672f3 100644 --- a/typedapi/core/scroll/scroll.go +++ b/typedapi/core/scroll/scroll.go @@ -16,9 +16,34 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Allows to retrieve a large numbers of results from a single search request. +// Run a scrolling search. +// +// IMPORTANT: The scroll API is no longer recommend for deep pagination. If you +// need to preserve the index state while paging through more than 10,000 hits, +// use the `search_after` parameter with a point in time (PIT). +// +// The scroll API gets large sets of results from a single scrolling search +// request. +// To get the necessary scroll ID, submit a search API request that includes an +// argument for the `scroll` query parameter. +// The `scroll` parameter indicates how long Elasticsearch should retain the +// search context for the request. +// The search response returns a scroll ID in the `_scroll_id` response body +// parameter. +// You can then use the scroll ID with the scroll API to retrieve the next batch +// of results for the request. +// If the Elasticsearch security features are enabled, the access to the results +// of a specific scroll ID is restricted to the user or API key that submitted +// the search. +// +// You can also use the scroll API to specify a new scroll parameter that +// extends or shortens the retention period for the search context. +// +// IMPORTANT: Results from a scrolling search reflect the state of the index at +// the time of the initial search request. Subsequent indexing or document +// changes only affect later search and scroll requests. package scroll import ( @@ -75,9 +100,34 @@ func NewScrollFunc(tp elastictransport.Interface) NewScroll { } } -// Allows to retrieve a large numbers of results from a single search request. +// Run a scrolling search. +// +// IMPORTANT: The scroll API is no longer recommend for deep pagination. If you +// need to preserve the index state while paging through more than 10,000 hits, +// use the `search_after` parameter with a point in time (PIT). +// +// The scroll API gets large sets of results from a single scrolling search +// request. +// To get the necessary scroll ID, submit a search API request that includes an +// argument for the `scroll` query parameter. +// The `scroll` parameter indicates how long Elasticsearch should retain the +// search context for the request. +// The search response returns a scroll ID in the `_scroll_id` response body +// parameter. +// You can then use the scroll ID with the scroll API to retrieve the next batch +// of results for the request. +// If the Elasticsearch security features are enabled, the access to the results +// of a specific scroll ID is restricted to the user or API key that submitted +// the search. +// +// You can also use the scroll API to specify a new scroll parameter that +// extends or shortens the retention period for the search context. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll +// IMPORTANT: Results from a scrolling search reflect the state of the index at +// the time of the initial search request. Subsequent indexing or document +// changes only affect later search and scroll requests. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/scroll-api.html func New(tp elastictransport.Interface) *Scroll { r := &Scroll{ transport: tp, @@ -85,8 +135,6 @@ func New(tp elastictransport.Interface) *Scroll { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -347,17 +395,27 @@ func (r *Scroll) Pretty(pretty bool) *Scroll { return r } -// Scroll Period to retain the search context for scrolling. +// The period to retain the search context for scrolling. // API name: scroll -func (r *Scroll) Scroll(duration types.Duration) *Scroll { - r.req.Scroll = duration +func (r *Scroll) Scroll(duration types.DurationVariant) *Scroll { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Scroll = *duration.DurationCaster() return r } -// ScrollId Scroll ID of the search. +// The scroll ID of the search. // API name: scroll_id func (r *Scroll) ScrollId(scrollid string) *Scroll { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScrollId = scrollid return r diff --git a/typedapi/core/search/request.go b/typedapi/core/search/request.go index 9bf0eae0c3..32362abd22 100644 --- a/typedapi/core/search/request.go +++ b/typedapi/core/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package search @@ -33,28 +33,27 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/SearchRequest.ts#L54-L530 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/SearchRequest.ts#L54-L590 type Request struct { // Aggregations Defines the aggregations that are run as part of the search request. Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` // Collapse Collapses search results the values of the specified field. Collapse *types.FieldCollapse `json:"collapse,omitempty"` - // DocvalueFields Array of wildcard (`*`) patterns. + // DocvalueFields An array of wildcard (`*`) field patterns. // The request returns doc values for field names matching these patterns in the // `hits.fields` property of the response. DocvalueFields []types.FieldAndFormat `json:"docvalue_fields,omitempty"` - // Explain If true, returns detailed information about score computation as part of a - // hit. + // Explain If `true`, the request returns detailed information about score computation + // as part of a hit. Explain *bool `json:"explain,omitempty"` // Ext Configuration of search extensions defined by Elasticsearch plugins. Ext map[string]json.RawMessage `json:"ext,omitempty"` - // Fields Array of wildcard (`*`) patterns. + // Fields An array of wildcard (`*`) field patterns. // The request returns values for field names matching these patterns in the // `hits.fields` property of the response. Fields []types.FieldAndFormat `json:"fields,omitempty"` - // From Starting document offset. - // Needs to be non-negative. + // From The starting document offset, which must be non-negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. @@ -62,14 +61,17 @@ type Request struct { // Highlight Specifies the highlighter to use for retrieving highlighted snippets from one // or more fields in your search results. Highlight *types.Highlight `json:"highlight,omitempty"` - // IndicesBoost Boosts the _score of documents from specified indices. + // IndicesBoost Boost the `_score` of documents from specified indices. + // The boost value is the factor by which scores are multiplied. + // A boost value greater than `1.0` increases the score. + // A boost value between `0` and `1.0` decreases the score. IndicesBoost []map[string]types.Float64 `json:"indices_boost,omitempty"` - // Knn Defines the approximate kNN search to run. + // Knn The approximate kNN search to run. Knn []types.KnnSearch `json:"knn,omitempty"` - // MinScore Minimum `_score` for matching documents. + // MinScore The minimum `_score` for matching documents. // Documents with a lower `_score` are not included in the search results. MinScore *types.Float64 `json:"min_score,omitempty"` - // Pit Limits the search to a point in time (PIT). + // Pit Limit the search to a point in time (PIT). // If you provide a PIT, you cannot specify an `` in the request path. Pit *types.PointInTimeReference `json:"pit,omitempty"` // PostFilter Use the `post_filter` parameter to filter search results. @@ -81,18 +83,19 @@ type Request struct { // NOTE: This is a debugging tool and adds significant overhead to search // execution. Profile *bool `json:"profile,omitempty"` - // Query Defines the search definition using the Query DSL. + // Query The search definition using the Query DSL. Query *types.Query `json:"query,omitempty"` - // Rank Defines the Reciprocal Rank Fusion (RRF) to use. + // Rank The Reciprocal Rank Fusion (RRF) to use. Rank *types.RankContainer `json:"rank,omitempty"` // Rescore Can be used to improve precision by reordering just the top (for example 100 // - 500) documents returned by the `query` and `post_filter` phases. Rescore []types.Rescore `json:"rescore,omitempty"` // Retriever A retriever is a specification to describe top documents returned from a - // search. A retriever replaces other elements of the search API that also - // return top documents such as query and knn. + // search. + // A retriever replaces other elements of the search API that also return top + // documents such as `query` and `knn`. Retriever *types.RetrieverContainer `json:"retriever,omitempty"` - // RuntimeMappings Defines one or more runtime fields in the search request. + // RuntimeMappings One or more runtime fields in the search request. // These fields take precedence over mapped fields with the same name. RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. @@ -100,52 +103,57 @@ type Request struct { // SearchAfter Used to retrieve the next page of hits using a set of sort values from the // previous page. SearchAfter []types.FieldValue `json:"search_after,omitempty"` - // SeqNoPrimaryTerm If `true`, returns sequence number and primary term of the last modification - // of each hit. + // SeqNoPrimaryTerm If `true`, the request returns sequence number and primary term of the last + // modification of each hit. SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` - // Size The number of hits to return. + // Size The number of hits to return, which must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. - // To page through more hits, use the `search_after` parameter. + // To page through more hits, use the `search_after` property. Size *int `json:"size,omitempty"` - // Slice Can be used to split a scrolled search into multiple slices that can be - // consumed independently. + // Slice Split a scrolled search into multiple slices that can be consumed + // independently. Slice *types.SlicedScroll `json:"slice,omitempty"` // Sort A comma-separated list of : pairs. Sort []types.SortCombinations `json:"sort,omitempty"` - // Source_ Indicates which source fields are returned for matching documents. - // These fields are returned in the hits._source property of the search + // Source_ The source fields that are returned for matching documents. + // These fields are returned in the `hits._source` property of the search // response. + // If the `stored_fields` property is specified, the `_source` property defaults + // to `false`. + // Otherwise, it defaults to `true`. Source_ types.SourceConfig `json:"_source,omitempty"` - // Stats Stats groups to associate with the search. + // Stats The stats groups to associate with the search. // Each group maintains a statistics aggregation for its associated searches. // You can retrieve these stats using the indices stats API. Stats []string `json:"stats,omitempty"` - // StoredFields List of stored fields to return as part of a hit. + // StoredFields A comma-separated list of stored fields to return as part of a hit. // If no fields are specified, no stored fields are included in the response. - // If this field is specified, the `_source` parameter defaults to `false`. + // If this field is specified, the `_source` property defaults to `false`. // You can pass `_source: true` to return both source fields and stored fields // in the search response. StoredFields []string `json:"stored_fields,omitempty"` // Suggest Defines a suggester that provides similar looking terms based on a provided // text. Suggest *types.Suggester `json:"suggest,omitempty"` - // TerminateAfter Maximum number of documents to collect for each shard. + // TerminateAfter The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. - // Use with caution. - // Elasticsearch applies this parameter to each shard handling the request. + // + // IMPORTANT: Use with caution. + // Elasticsearch applies this property to each shard handling the request. // When possible, let Elasticsearch perform early termination automatically. - // Avoid specifying this parameter for requests that target data streams with + // Avoid specifying this property for requests that target data streams with // backing indices across multiple data tiers. + // // If set to `0` (default), the query does not terminate early. TerminateAfter *int64 `json:"terminate_after,omitempty"` - // Timeout Specifies the period of time to wait for a response from each shard. + // Timeout The period of time to wait for a response from each shard. // If no response is received before the timeout expires, the request fails and // returns an error. // Defaults to no timeout. Timeout *string `json:"timeout,omitempty"` - // TrackScores If true, calculate and return document scores, even if the scores are not + // TrackScores If `true`, calculate and return document scores, even if the scores are not // used for sorting. TrackScores *bool `json:"track_scores,omitempty"` // TrackTotalHits Number of hits matching the query to count accurately. @@ -154,7 +162,7 @@ type Request struct { // If `false`, the response does not include the total number of hits matching // the query. TrackTotalHits types.TrackHits `json:"track_total_hits,omitempty"` - // Version If true, returns document version as part of a hit. + // Version If `true`, the request returns the document version as part of a hit. Version *bool `json:"version,omitempty"` } diff --git a/typedapi/core/search/response.go b/typedapi/core/search/response.go index 25f2df5dce..c434a08d93 100644 --- a/typedapi/core/search/response.go +++ b/typedapi/core/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package search @@ -34,22 +34,46 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/SearchResponse.ts#L34-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/SearchResponse.ts#L34-L36 type Response struct { - Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` - Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits types.HitsMetadata `json:"hits"` - MaxScore *types.Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *types.Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. Shards_ types.ShardStatistics `json:"_shards"` Suggest map[string][]types.Suggest `json:"suggest,omitempty"` TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` } // NewResponse returns a Response @@ -504,6 +528,13 @@ func (s *Response) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { diff --git a/typedapi/core/search/search.go b/typedapi/core/search/search.go index 66a42628ff..26572d2f30 100644 --- a/typedapi/core/search/search.go +++ b/typedapi/core/search/search.go @@ -16,12 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns search hits that match the query defined in the request. +// Run a search. +// +// Get search hits that match the query defined in the request. // You can provide search queries using the `q` query string parameter or the // request body. // If both are specified, only the query parameter is used. +// +// If the Elasticsearch security features are enabled, you must have the read +// index privilege for the target data stream, index, or alias. For +// cross-cluster search, refer to the documentation about configuring CCS +// privileges. +// To search a point in time (PIT) for an alias, you must have the `read` index +// privilege for the alias's data streams or indices. +// +// **Search slicing** +// +// When paging through a large number of documents, it can be helpful to split +// the search into multiple slices to consume them independently with the +// `slice` and `pit` properties. +// By default the splitting is done first on the shards, then locally on each +// shard. +// The local splitting partitions the shard into contiguous ranges based on +// Lucene document IDs. +// +// For instance if the number of shards is equal to 2 and you request 4 slices, +// the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are +// assigned to the second shard. +// +// IMPORTANT: The same point-in-time ID should be used for all slices. +// If different PIT IDs are used, slices can overlap and miss documents. +// This situation can occur because the splitting criterion is based on Lucene +// document IDs, which are not stable across changes to the index. package search import ( @@ -86,11 +114,39 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { } } -// Returns search hits that match the query defined in the request. +// Run a search. +// +// Get search hits that match the query defined in the request. // You can provide search queries using the `q` query string parameter or the // request body. // If both are specified, only the query parameter is used. // +// If the Elasticsearch security features are enabled, you must have the read +// index privilege for the target data stream, index, or alias. For +// cross-cluster search, refer to the documentation about configuring CCS +// privileges. +// To search a point in time (PIT) for an alias, you must have the `read` index +// privilege for the alias's data streams or indices. +// +// **Search slicing** +// +// When paging through a large number of documents, it can be helpful to split +// the search into multiple slices to consume them independently with the +// `slice` and `pit` properties. +// By default the splitting is done first on the shards, then locally on each +// shard. +// The local splitting partitions the shard into contiguous ranges based on +// Lucene document IDs. +// +// For instance if the number of shards is equal to 2 and you request 4 slices, +// the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are +// assigned to the second shard. +// +// IMPORTANT: The same point-in-time ID should be used for all slices. +// If different PIT IDs are used, slices can overlap and miss documents. +// This situation can occur because the splitting criterion is based on Lucene +// document IDs, which are not stable across changes to the index. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html func New(tp elastictransport.Interface) *Search { r := &Search{ @@ -99,8 +155,6 @@ func New(tp elastictransport.Interface) *Search { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -319,8 +373,8 @@ func (r *Search) Header(key, value string) *Search { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams and indices, omit this parameter or use `*` or // `_all`. // API Name: index @@ -343,8 +397,12 @@ func (r *Search) AllowNoIndices(allownoindices bool) *Search { return r } -// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or shard -// failures. If false, returns an error with no partial results. +// AllowPartialSearchResults If `true` and there are shard request timeouts or shard failures, the request +// returns partial results. +// If `false`, it returns an error with no partial results. +// +// To override the default behavior, you can set the +// `search.default_allow_partial_results` cluster setting to `false`. // API name: allow_partial_search_results func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search { r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) @@ -352,8 +410,8 @@ func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Sear return r } -// Analyzer Analyzer to use for the query string. -// This parameter can only be used when the q query string parameter is +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyzer func (r *Search) Analyzer(analyzer string) *Search { @@ -362,8 +420,8 @@ func (r *Search) Analyzer(analyzer string) *Search { return r } -// AnalyzeWildcard If true, wildcard and prefix queries are analyzed. -// This parameter can only be used when the q query string parameter is +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: analyze_wildcard func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search { @@ -374,9 +432,9 @@ func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search { // BatchedReduceSize The number of shard results that should be reduced at once on the // coordinating node. -// This value should be used as a protection mechanism to reduce the memory -// overhead per search request if the potential number of shards in the request -// can be large. +// If the potential number of shards in the request can be large, this value +// should be used as a protection mechanism to reduce the memory overhead per +// search request. // API name: batched_reduce_size func (r *Search) BatchedReduceSize(batchedreducesize string) *Search { r.values.Set("batched_reduce_size", batchedreducesize) @@ -384,8 +442,8 @@ func (r *Search) BatchedReduceSize(batchedreducesize string) *Search { return r } -// CcsMinimizeRoundtrips If true, network round-trips between the coordinating node and the remote -// clusters are minimized when executing cross-cluster search (CCS) requests. +// CcsMinimizeRoundtrips If `true`, network round-trips between the coordinating node and the remote +// clusters are minimized when running cross-cluster search (CCS) requests. // API name: ccs_minimize_roundtrips func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search { r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) @@ -393,8 +451,8 @@ func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search { return r } -// DefaultOperator The default operator for query string query: AND or OR. -// This parameter can only be used when the `q` query string parameter is +// DefaultOperator The default operator for the query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: default_operator func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search { @@ -403,8 +461,9 @@ func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search { return r } -// Df Field to use as default where no field prefix is given in the query string. -// This parameter can only be used when the q query string parameter is +// Df The field to use as a default when no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is // specified. // API name: df func (r *Search) Df(df string) *Search { @@ -413,10 +472,10 @@ func (r *Search) Df(df string) *Search { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. +// It supports comma-separated values such as `open,hidden`. // API name: expand_wildcards func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search { tmp := []string{} @@ -445,10 +504,9 @@ func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search { return r } -// IncludeNamedQueriesScore Indicates whether hit.matched_queries should be rendered as a map that -// includes -// the name of the matched query associated with its score (true) -// or as an array containing the name of the matched queries (false) +// IncludeNamedQueriesScore If `true`, the response includes the score contribution from any named +// queries. +// // This functionality reruns each named query on every hit in a search response. // Typically, this adds a small overhead to a request. // However, using computationally expensive named queries on a large number of @@ -462,7 +520,7 @@ func (r *Search) IncludeNamedQueriesScore(includenamedqueriesscore bool) *Search // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. -// This parameter can only be used when the `q` query string parameter is +// This parameter can be used only when the `q` query string parameter is // specified. // API name: lenient func (r *Search) Lenient(lenient bool) *Search { @@ -471,7 +529,7 @@ func (r *Search) Lenient(lenient bool) *Search { return r } -// MaxConcurrentShardRequests Defines the number of concurrent shard requests per node this search executes +// MaxConcurrentShardRequests The number of concurrent shard requests per node that the search runs // concurrently. // This value should be used to limit the impact of the search on the cluster in // order to limit the number of concurrent shard requests. @@ -491,23 +549,25 @@ func (r *Search) MinCompatibleShardNode(versionstring string) *Search { return r } -// Preference Nodes and shards used for the search. +// Preference The nodes and shards used for the search. // By default, Elasticsearch selects from eligible nodes and shards using -// adaptive replica selection, accounting for allocation awareness. Valid values -// are: -// `_only_local` to run the search only on shards on the local node; -// `_local` to, if possible, run the search on shards on the local node, or if +// adaptive replica selection, accounting for allocation awareness. +// Valid values are: +// +// * `_only_local` to run the search only on shards on the local node; +// * `_local` to, if possible, run the search on shards on the local node, or if // not, select shards using the default method; -// `_only_nodes:,` to run the search on only the specified +// * `_only_nodes:,` to run the search on only the specified // nodes IDs, where, if suitable shards exist on more than one selected node, // use shards on those nodes using the default method, or if none of the // specified nodes are available, select shards from any available node using // the default method; -// `_prefer_nodes:,` to if possible, run the search on the +// * `_prefer_nodes:,` to if possible, run the search on the // specified nodes IDs, or if not, select shards using the default method; -// `_shards:,` to run the search only on the specified shards; -// `` (any string that does not start with `_`) to route searches -// with the same `` to the same shards in the same order. +// * `_shards:,` to run the search only on the specified shards; +// * `` (any string that does not start with `_`) to route +// searches with the same `` to the same shards in the same +// order. // API name: preference func (r *Search) Preference(preference string) *Search { r.values.Set("preference", preference) @@ -515,18 +575,19 @@ func (r *Search) Preference(preference string) *Search { return r } -// PreFilterShardSize Defines a threshold that enforces a pre-filter roundtrip to prefilter search -// shards based on query rewriting if the number of shards the search request -// expands to exceeds the threshold. +// PreFilterShardSize A threshold that enforces a pre-filter roundtrip to prefilter search shards +// based on query rewriting if the number of shards the search request expands +// to exceeds the threshold. // This filter roundtrip can limit the number of shards significantly if for // instance a shard can not match any documents based on its rewrite method (if // date filters are mandatory to match but the shard bounds and the query are // disjoint). // When unspecified, the pre-filter phase is executed if any of these conditions // is met: -// the request targets more than 128 shards; -// the request targets one or more read-only index; -// the primary sort of the query targets an indexed field. +// +// * The request targets more than 128 shards. +// * The request targets one or more read-only index. +// * The primary sort of the query targets an indexed field. // API name: pre_filter_shard_size func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search { r.values.Set("pre_filter_shard_size", prefiltershardsize) @@ -536,7 +597,7 @@ func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search { // RequestCache If `true`, the caching of search results is enabled for requests where `size` // is `0`. -// Defaults to index level settings. +// It defaults to index level settings. // API name: request_cache func (r *Search) RequestCache(requestcache bool) *Search { r.values.Set("request_cache", strconv.FormatBool(requestcache)) @@ -544,7 +605,7 @@ func (r *Search) RequestCache(requestcache bool) *Search { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Search) Routing(routing string) *Search { r.values.Set("routing", routing) @@ -552,9 +613,9 @@ func (r *Search) Routing(routing string) *Search { return r } -// Scroll Period to retain the search context for scrolling. See Scroll search results. +// Scroll The period to retain the search context for scrolling. // By default, this value cannot exceed `1d` (24 hours). -// You can change this limit using the `search.max_keep_alive` cluster-level +// You can change this limit by using the `search.max_keep_alive` cluster-level // setting. // API name: scroll func (r *Search) Scroll(duration string) *Search { @@ -563,7 +624,8 @@ func (r *Search) Scroll(duration string) *Search { return r } -// SearchType How distributed term frequencies are calculated for relevance scoring. +// SearchType Indicates how distributed term frequencies are calculated for relevance +// scoring. // API name: search_type func (r *Search) SearchType(searchtype searchtype.SearchType) *Search { r.values.Set("search_type", searchtype.String()) @@ -571,7 +633,7 @@ func (r *Search) SearchType(searchtype searchtype.SearchType) *Search { return r } -// SuggestField Specifies which field to use for suggestions. +// SuggestField The field to use for suggestions. // API name: suggest_field func (r *Search) SuggestField(field string) *Search { r.values.Set("suggest_field", field) @@ -579,8 +641,8 @@ func (r *Search) SuggestField(field string) *Search { return r } -// SuggestMode Specifies the suggest mode. -// This parameter can only be used when the `suggest_field` and `suggest_text` +// SuggestMode The suggest mode. +// This parameter can be used only when the `suggest_field` and `suggest_text` // query string parameters are specified. // API name: suggest_mode func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search { @@ -589,8 +651,8 @@ func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search { return r } -// SuggestSize Number of suggestions to return. -// This parameter can only be used when the `suggest_field` and `suggest_text` +// SuggestSize The number of suggestions to return. +// This parameter can be used only when the `suggest_field` and `suggest_text` // query string parameters are specified. // API name: suggest_size func (r *Search) SuggestSize(suggestsize string) *Search { @@ -600,7 +662,7 @@ func (r *Search) SuggestSize(suggestsize string) *Search { } // SuggestText The source text for which the suggestions should be returned. -// This parameter can only be used when the `suggest_field` and `suggest_text` +// This parameter can be used only when the `suggest_field` and `suggest_text` // query string parameters are specified. // API name: suggest_text func (r *Search) SuggestText(suggesttext string) *Search { @@ -650,9 +712,13 @@ func (r *Search) SourceIncludes_(fields ...string) *Search { return r } -// Q Query in the Lucene query string syntax using query parameter search. +// Q A query in the Lucene query string syntax. // Query parameter searches do not support the full Elasticsearch Query DSL but // are handy for testing. +// +// IMPORTANT: This parameter overrides the query parameter in the request body. +// If both parameters are specified, documents matching the query request body +// parameter are not returned. // API name: q func (r *Search) Q(q string) *Search { r.values.Set("q", q) @@ -716,340 +782,568 @@ func (r *Search) Pretty(pretty bool) *Search { return r } -// Aggregations Defines the aggregations that are run as part of the search request. +// Defines the aggregations that are run as part of the search request. // API name: aggregations func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *Search) AddAggregation(key string, value types.AggregationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp return r } -// Collapse Collapses search results the values of the specified field. +// Collapses search results the values of the specified field. // API name: collapse -func (r *Search) Collapse(collapse *types.FieldCollapse) *Search { +func (r *Search) Collapse(collapse types.FieldCollapseVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Collapse = collapse + r.req.Collapse = collapse.FieldCollapseCaster() return r } -// DocvalueFields Array of wildcard (`*`) patterns. +// An array of wildcard (`*`) field patterns. // The request returns doc values for field names matching these patterns in the // `hits.fields` property of the response. // API name: docvalue_fields -func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Search { - r.req.DocvalueFields = docvaluefields +func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + } return r } -// Explain If true, returns detailed information about score computation as part of a -// hit. +// If `true`, the request returns detailed information about score computation +// as part of a hit. // API name: explain func (r *Search) Explain(explain bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Ext Configuration of search extensions defined by Elasticsearch plugins. +// Configuration of search extensions defined by Elasticsearch plugins. // API name: ext func (r *Search) Ext(ext map[string]json.RawMessage) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Ext = ext + return r +} + +func (r *Search) AddExt(key string, value json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + var tmp map[string]json.RawMessage + if r.req.Ext == nil { + r.req.Ext = make(map[string]json.RawMessage) + } else { + tmp = r.req.Ext + } + + tmp[key] = value + + r.req.Ext = tmp return r } -// Fields Array of wildcard (`*`) patterns. +// An array of wildcard (`*`) field patterns. // The request returns values for field names matching these patterns in the // `hits.fields` property of the response. // API name: fields -func (r *Search) Fields(fields ...types.FieldAndFormat) *Search { - r.req.Fields = fields +func (r *Search) Fields(fields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range fields { + + r.req.Fields = append(r.req.Fields, *v.FieldAndFormatCaster()) + } return r } -// From Starting document offset. -// Needs to be non-negative. +// The starting document offset, which must be non-negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: from func (r *Search) From(from int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Highlight Specifies the highlighter to use for retrieving highlighted snippets from one +// Specifies the highlighter to use for retrieving highlighted snippets from one // or more fields in your search results. // API name: highlight -func (r *Search) Highlight(highlight *types.Highlight) *Search { +func (r *Search) Highlight(highlight types.HighlightVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Highlight = highlight + r.req.Highlight = highlight.HighlightCaster() return r } -// IndicesBoost Boosts the _score of documents from specified indices. +// Boost the `_score` of documents from specified indices. +// The boost value is the factor by which scores are multiplied. +// A boost value greater than `1.0` increases the score. +// A boost value between `0` and `1.0` decreases the score. // API name: indices_boost -func (r *Search) IndicesBoost(indicesboosts ...map[string]types.Float64) *Search { - r.req.IndicesBoost = indicesboosts +func (r *Search) IndicesBoost(indicesboost []map[string]types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesBoost = indicesboost return r } -// Knn Defines the approximate kNN search to run. +// The approximate kNN search to run. // API name: knn -func (r *Search) Knn(knns ...types.KnnSearch) *Search { - r.req.Knn = knns +func (r *Search) Knn(knns ...types.KnnSearchVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Knn = make([]types.KnnSearch, len(knns)) + for i, v := range knns { + r.req.Knn[i] = *v.KnnSearchCaster() + } return r } -// MinScore Minimum `_score` for matching documents. +// The minimum `_score` for matching documents. // Documents with a lower `_score` are not included in the search results. // API name: min_score func (r *Search) MinScore(minscore types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MinScore = &minscore return r } -// Pit Limits the search to a point in time (PIT). +// Limit the search to a point in time (PIT). // If you provide a PIT, you cannot specify an `` in the request path. // API name: pit -func (r *Search) Pit(pit *types.PointInTimeReference) *Search { +func (r *Search) Pit(pit types.PointInTimeReferenceVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pit = pit + r.req.Pit = pit.PointInTimeReferenceCaster() return r } -// PostFilter Use the `post_filter` parameter to filter search results. +// Use the `post_filter` parameter to filter search results. // The search hits are filtered after the aggregations are calculated. // A post filter has no impact on the aggregation results. // API name: post_filter -func (r *Search) PostFilter(postfilter *types.Query) *Search { +func (r *Search) PostFilter(postfilter types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PostFilter = postfilter + r.req.PostFilter = postfilter.QueryCaster() return r } -// Profile Set to `true` to return detailed timing information about the execution of +// Set to `true` to return detailed timing information about the execution of // individual components in a search request. // NOTE: This is a debugging tool and adds significant overhead to search // execution. // API name: profile func (r *Search) Profile(profile bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Query Defines the search definition using the Query DSL. +// The search definition using the Query DSL. // API name: query -func (r *Search) Query(query *types.Query) *Search { +func (r *Search) Query(query types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Rank Defines the Reciprocal Rank Fusion (RRF) to use. +// The Reciprocal Rank Fusion (RRF) to use. // API name: rank -func (r *Search) Rank(rank *types.RankContainer) *Search { +func (r *Search) Rank(rank types.RankContainerVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Rank = rank + r.req.Rank = rank.RankContainerCaster() return r } -// Rescore Can be used to improve precision by reordering just the top (for example 100 +// Can be used to improve precision by reordering just the top (for example 100 // - 500) documents returned by the `query` and `post_filter` phases. // API name: rescore -func (r *Search) Rescore(rescores ...types.Rescore) *Search { - r.req.Rescore = rescores +func (r *Search) Rescore(rescores ...types.RescoreVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + r.req.Rescore[i] = *v.RescoreCaster() + } return r } -// Retriever A retriever is a specification to describe top documents returned from a -// search. A retriever replaces other elements of the search API that also -// return top documents such as query and knn. +// A retriever is a specification to describe top documents returned from a +// search. +// A retriever replaces other elements of the search API that also return top +// documents such as `query` and `knn`. // API name: retriever -func (r *Search) Retriever(retriever *types.RetrieverContainer) *Search { +func (r *Search) Retriever(retriever types.RetrieverContainerVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Retriever = retriever + r.req.Retriever = retriever.RetrieverContainerCaster() return r } -// RuntimeMappings Defines one or more runtime fields in the search request. +// One or more runtime fields in the search request. // These fields take precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search { - r.req.RuntimeMappings = runtimefields +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Retrieve a script evaluation (based on different fields) for each hit. +// Retrieve a script evaluation (based on different fields) for each hit. // API name: script_fields func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} +func (r *Search) AddScriptField(key string, value types.ScriptFieldVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + r.req.ScriptFields = tmp return r } -// SearchAfter Used to retrieve the next page of hits using a set of sort values from the +// Used to retrieve the next page of hits using a set of sort values from the // previous page. // API name: search_after -func (r *Search) SearchAfter(sortresults ...types.FieldValue) *Search { - r.req.SearchAfter = sortresults +func (r *Search) SearchAfter(sortresults ...types.FieldValueVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// SeqNoPrimaryTerm If `true`, returns sequence number and primary term of the last modification -// of each hit. +// If `true`, the request returns sequence number and primary term of the last +// modification of each hit. // API name: seq_no_primary_term func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm return r } -// Size The number of hits to return. +// The number of hits to return, which must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. -// To page through more hits, use the `search_after` parameter. +// To page through more hits, use the `search_after` property. // API name: size func (r *Search) Size(size int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Slice Can be used to split a scrolled search into multiple slices that can be -// consumed independently. +// Split a scrolled search into multiple slices that can be consumed +// independently. // API name: slice -func (r *Search) Slice(slice *types.SlicedScroll) *Search { +func (r *Search) Slice(slice types.SlicedScrollVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } -// Sort A comma-separated list of : pairs. +// A comma-separated list of : pairs. // API name: sort -func (r *Search) Sort(sorts ...types.SortCombinations) *Search { - r.req.Sort = sorts +func (r *Search) Sort(sorts ...types.SortCombinationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } -// Source_ Indicates which source fields are returned for matching documents. -// These fields are returned in the hits._source property of the search +// The source fields that are returned for matching documents. +// These fields are returned in the `hits._source` property of the search // response. +// If the `stored_fields` property is specified, the `_source` property defaults +// to `false`. +// Otherwise, it defaults to `true`. // API name: _source -func (r *Search) Source_(sourceconfig types.SourceConfig) *Search { - r.req.Source_ = sourceconfig +func (r *Search) Source_(sourceconfig types.SourceConfigVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// Stats Stats groups to associate with the search. +// The stats groups to associate with the search. // Each group maintains a statistics aggregation for its associated searches. // You can retrieve these stats using the indices stats API. // API name: stats func (r *Search) Stats(stats ...string) *Search { - r.req.Stats = stats + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stats { + + r.req.Stats = append(r.req.Stats, v) + } return r } -// StoredFields List of stored fields to return as part of a hit. +// A comma-separated list of stored fields to return as part of a hit. // If no fields are specified, no stored fields are included in the response. -// If this field is specified, the `_source` parameter defaults to `false`. +// If this field is specified, the `_source` property defaults to `false`. // You can pass `_source: true` to return both source fields and stored fields // in the search response. // API name: stored_fields func (r *Search) StoredFields(fields ...string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.StoredFields = fields return r } -// Suggest Defines a suggester that provides similar looking terms based on a provided +// Defines a suggester that provides similar looking terms based on a provided // text. // API name: suggest -func (r *Search) Suggest(suggest *types.Suggester) *Search { +func (r *Search) Suggest(suggest types.SuggesterVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Suggest = suggest + r.req.Suggest = suggest.SuggesterCaster() return r } -// TerminateAfter Maximum number of documents to collect for each shard. +// The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. -// Use with caution. -// Elasticsearch applies this parameter to each shard handling the request. +// +// IMPORTANT: Use with caution. +// Elasticsearch applies this property to each shard handling the request. // When possible, let Elasticsearch perform early termination automatically. -// Avoid specifying this parameter for requests that target data streams with +// Avoid specifying this property for requests that target data streams with // backing indices across multiple data tiers. +// // If set to `0` (default), the query does not terminate early. // API name: terminate_after func (r *Search) TerminateAfter(terminateafter int64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TerminateAfter = &terminateafter return r } -// Timeout Specifies the period of time to wait for a response from each shard. +// The period of time to wait for a response from each shard. // If no response is received before the timeout expires, the request fails and // returns an error. // Defaults to no timeout. // API name: timeout func (r *Search) Timeout(timeout string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Timeout = &timeout return r } -// TrackScores If true, calculate and return document scores, even if the scores are not +// If `true`, calculate and return document scores, even if the scores are not // used for sorting. // API name: track_scores func (r *Search) TrackScores(trackscores bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TrackScores = &trackscores return r } -// TrackTotalHits Number of hits matching the query to count accurately. +// Number of hits matching the query to count accurately. // If `true`, the exact number of hits is returned at the cost of some // performance. // If `false`, the response does not include the total number of hits matching // the query. // API name: track_total_hits -func (r *Search) TrackTotalHits(trackhits types.TrackHits) *Search { - r.req.TrackTotalHits = trackhits +func (r *Search) TrackTotalHits(trackhits types.TrackHitsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() return r } -// Version If true, returns document version as part of a hit. +// If `true`, the request returns the document version as part of a hit. // API name: version func (r *Search) Version(version bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &version return r diff --git a/typedapi/core/searchmvt/request.go b/typedapi/core/searchmvt/request.go index addc12ae59..2c9769aa1b 100644 --- a/typedapi/core/searchmvt/request.go +++ b/typedapi/core/searchmvt/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package searchmvt @@ -35,65 +35,80 @@ import ( // Request holds the request body struct for the package searchmvt // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search_mvt/SearchMvtRequest.ts#L33-L190 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search_mvt/SearchMvtRequest.ts#L33-L373 type Request struct { // Aggs Sub-aggregations for the geotile_grid. // - // Supports the following aggregation types: - // - avg - // - cardinality - // - max - // - min - // - sum + // It supports the following aggregation types: + // + // - `avg` + // - `boxplot` + // - `cardinality` + // - `extended stats` + // - `max` + // - `median absolute deviation` + // - `min` + // - `percentile` + // - `percentile-rank` + // - `stats` + // - `sum` + // - `value count` + // + // The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is + // reserved for internal aggregations. Aggs map[string]types.Aggregations `json:"aggs,omitempty"` - // Buffer Size, in pixels, of a clipping buffer outside the tile. This allows renderers + // Buffer The size, in pixels, of a clipping buffer outside the tile. This allows + // renderers // to avoid outline artifacts from geometries that extend past the extent of the // tile. Buffer *int `json:"buffer,omitempty"` - // ExactBounds If false, the meta layer’s feature is the bounding box of the tile. - // If true, the meta layer’s feature is a bounding box resulting from a - // geo_bounds aggregation. The aggregation runs on values that intersect - // the // tile with wrap_longitude set to false. The resulting + // ExactBounds If `false`, the meta layer's feature is the bounding box of the tile. + // If `true`, the meta layer's feature is a bounding box resulting from a + // `geo_bounds` aggregation. The aggregation runs on values that + // intersect + // the `//` tile with `wrap_longitude` set to `false`. The resulting // bounding box may be larger than the vector tile. ExactBounds *bool `json:"exact_bounds,omitempty"` - // Extent Size, in pixels, of a side of the tile. Vector tiles are square with equal - // sides. + // Extent The size, in pixels, of a side of the tile. Vector tiles are square with + // equal sides. Extent *int `json:"extent,omitempty"` - // Fields Fields to return in the `hits` layer. Supports wildcards (`*`). + // Fields The fields to return in the `hits` layer. + // It supports wildcards (`*`). // This parameter does not support fields with array values. Fields with array // values may return inconsistent results. Fields []string `json:"fields,omitempty"` - // GridAgg Aggregation used to create a grid for the `field`. + // GridAgg The aggregation used to create a grid for the `field`. GridAgg *gridaggregationtype.GridAggregationType `json:"grid_agg,omitempty"` // GridPrecision Additional zoom levels available through the aggs layer. For example, if - // is 7 - // and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, - // results - // don’t include the aggs layer. + // `` is `7` + // and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If + // 0, results + // don't include the aggs layer. GridPrecision *int `json:"grid_precision,omitempty"` // GridType Determines the geometry type for features in the aggs layer. In the aggs // layer, - // each feature represents a geotile_grid cell. If 'grid' each feature is a - // Polygon - // of the cells bounding box. If 'point' each feature is a Point that is the + // each feature represents a `geotile_grid` cell. If `grid, each feature is a + // polygon + // of the cells bounding box. If `point`, each feature is a Point that is the // centroid // of the cell. GridType *gridtype.GridType `json:"grid_type,omitempty"` - // Query Query DSL used to filter documents for the search. + // Query The query DSL used to filter documents for the search. Query *types.Query `json:"query,omitempty"` // RuntimeMappings Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` - // Size Maximum number of features to return in the hits layer. Accepts 0-10000. - // If 0, results don’t include the hits layer. + // Size The maximum number of features to return in the hits layer. Accepts 0-10000. + // If 0, results don't include the hits layer. Size *int `json:"size,omitempty"` - // Sort Sorts features in the hits layer. By default, the API calculates a bounding - // box for each feature. It sorts features based on this box’s diagonal length, + // Sort Sort the features in the hits layer. By default, the API calculates a + // bounding + // box for each feature. It sorts features based on this box's diagonal length, // from longest to shortest. Sort []types.SortCombinations `json:"sort,omitempty"` - // TrackTotalHits Number of hits matching the query to count accurately. If `true`, the exact - // number + // TrackTotalHits The number of hits matching the query to count accurately. If `true`, the + // exact number // of hits is returned at the cost of some performance. If `false`, the response // does // not include the total number of hits matching the query. @@ -101,6 +116,20 @@ type Request struct { // WithLabels If `true`, the hits and aggs layers will contain additional point features // representing // suggested label positions for the original features. + // + // * `Point` and `MultiPoint` features will have one of the points selected. + // * `Polygon` and `MultiPolygon` features will have a single point generated, + // either the centroid, if it is within the polygon, or another point within the + // polygon selected from the sorted triangle-tree. + // * `LineString` features will likewise provide a roughly central point + // selected from the triangle-tree. + // * The aggregation results will provide one central point for each aggregation + // bucket. + // + // All attributes from the original features will also be copied to the new + // label features. + // In addition, the new features will be distinguishable using the tag + // `_mvt_label_position`. WithLabels *bool `json:"with_labels,omitempty"` } diff --git a/typedapi/core/searchmvt/response.go b/typedapi/core/searchmvt/response.go index 6d1eb9b20f..0dbf745efc 100644 --- a/typedapi/core/searchmvt/response.go +++ b/typedapi/core/searchmvt/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package searchmvt // Response holds the response body struct for the package searchmvt // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search_mvt/SearchMvtResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search_mvt/SearchMvtResponse.ts#L22-L25 type Response = []byte diff --git a/typedapi/core/searchmvt/search_mvt.go b/typedapi/core/searchmvt/search_mvt.go index 0e0a0ff712..fd68889c9a 100644 --- a/typedapi/core/searchmvt/search_mvt.go +++ b/typedapi/core/searchmvt/search_mvt.go @@ -16,10 +16,177 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Search a vector tile. -// Searches a vector tile for geospatial values. +// +// Search a vector tile for geospatial values. +// Before using this API, you should be familiar with the Mapbox vector tile +// specification. +// The API returns results as a binary mapbox vector tile. +// +// Internally, Elasticsearch translates a vector tile search API request into a +// search containing: +// +// * A `geo_bounding_box` query on the ``. The query uses the +// `//` tile as a bounding box. +// * A `geotile_grid` or `geohex_grid` aggregation on the ``. The +// `grid_agg` parameter determines the aggregation type. The aggregation uses +// the `//` tile as a bounding box. +// * Optionally, a `geo_bounds` aggregation on the ``. The search only +// includes this aggregation if the `exact_bounds` parameter is `true`. +// * If the optional parameter `with_labels` is `true`, the internal search will +// include a dynamic runtime field that calls the `getLabelPosition` function of +// the geometry doc value. This enables the generation of new point features +// containing suggested geometry labels, so that, for example, multi-polygons +// will have only one label. +// +// For example, Elasticsearch may translate a vector tile search API request +// with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of +// `true` into the following search +// +// ``` +// GET my-index/_search +// +// { +// "size": 10000, +// "query": { +// "geo_bounding_box": { +// "my-geo-field": { +// "top_left": { +// "lat": -40.979898069620134, +// "lon": -45 +// }, +// "bottom_right": { +// "lat": -66.51326044311186, +// "lon": 0 +// } +// } +// } +// }, +// "aggregations": { +// "grid": { +// "geotile_grid": { +// "field": "my-geo-field", +// "precision": 11, +// "size": 65536, +// "bounds": { +// "top_left": { +// "lat": -40.979898069620134, +// "lon": -45 +// }, +// "bottom_right": { +// "lat": -66.51326044311186, +// "lon": 0 +// } +// } +// } +// }, +// "bounds": { +// "geo_bounds": { +// "field": "my-geo-field", +// "wrap_longitude": false +// } +// } +// } +// } +// +// ``` +// +// The API returns results as a binary Mapbox vector tile. +// Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the +// tile contains three layers: +// +// * A `hits` layer containing a feature for each `` value matching the +// `geo_bounding_box` query. +// * An `aggs` layer containing a feature for each cell of the `geotile_grid` or +// `geohex_grid`. The layer only contains features for cells with matching data. +// * A meta layer containing: +// - A feature containing a bounding box. By default, this is the bounding box +// +// of the tile. +// - Value ranges for any sub-aggregations on the `geotile_grid` or +// +// `geohex_grid`. +// - Metadata for the search. +// +// The API only returns features that can display at its zoom level. +// For example, if a polygon feature has no area at its zoom level, the API +// omits it. +// The API returns errors as UTF-8 encoded JSON. +// +// IMPORTANT: You can specify several options for this API as either a query +// parameter or request body parameter. +// If you specify both parameters, the query parameter takes precedence. +// +// **Grid precision for geotile** +// +// For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles +// for lower zoom levels. +// `grid_precision` represents the additional zoom levels available through +// these cells. The final precision is computed by as follows: ` + +// grid_precision`. +// For example, if `` is 7 and `grid_precision` is 8, then the +// `geotile_grid` aggregation will use a precision of 15. +// The maximum final precision is 29. +// The `grid_precision` also determines the number of cells for the grid as +// follows: `(2^grid_precision) x (2^grid_precision)`. +// For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +// The `aggs` layer only contains features for cells with matching data. +// +// **Grid precision for geohex** +// +// For a `grid_agg` of `geohex`, Elasticsearch uses `` and +// `grid_precision` to calculate a final precision as follows: ` + +// grid_precision`. +// +// This precision determines the H3 resolution of the hexagonal cells produced +// by the `geohex` aggregation. +// The following table maps the H3 resolution for each precision. +// For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +// At a precision of 6, hexagonal cells have an H3 resolution of 2. +// If `` is 3 and `grid_precision` is 4, the precision is 7. +// At a precision of 7, hexagonal cells have an H3 resolution of 3. +// +// | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +// | --------- | ---------------- | ------------- | ----------------| ----- | +// | 1 | 4 | 0 | 122 | 30.5 | +// | 2 | 16 | 0 | 122 | 7.625 | +// | 3 | 64 | 1 | 842 | 13.15625 | +// | 4 | 256 | 1 | 842 | 3.2890625 | +// | 5 | 1024 | 2 | 5882 | 5.744140625 | +// | 6 | 4096 | 2 | 5882 | 1.436035156 | +// | 7 | 16384 | 3 | 41162 | 2.512329102 | +// | 8 | 65536 | 3 | 41162 | 0.6280822754 | +// | 9 | 262144 | 4 | 288122 | 1.099098206 | +// | 10 | 1048576 | 4 | 288122 | 0.2747745514 | +// | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +// | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +// | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +// | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +// | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +// | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +// | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +// | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +// | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +// | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +// | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +// | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +// | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +// | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +// | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +// | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +// | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +// | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +// | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | +// +// Hexagonal cells don't align perfectly on a vector tile. +// Some cells may intersect more than one vector tile. +// To compute the H3 resolution for each precision, Elasticsearch compares the +// average density of hexagonal bins at each resolution with the average density +// of tile bins at each zoom level. +// Elasticsearch uses the H3 resolution that is closest to the corresponding +// geotile density. package searchmvt import ( @@ -105,7 +272,174 @@ func NewSearchMvtFunc(tp elastictransport.Interface) NewSearchMvt { } // Search a vector tile. -// Searches a vector tile for geospatial values. +// +// Search a vector tile for geospatial values. +// Before using this API, you should be familiar with the Mapbox vector tile +// specification. +// The API returns results as a binary mapbox vector tile. +// +// Internally, Elasticsearch translates a vector tile search API request into a +// search containing: +// +// * A `geo_bounding_box` query on the ``. The query uses the +// `//` tile as a bounding box. +// * A `geotile_grid` or `geohex_grid` aggregation on the ``. The +// `grid_agg` parameter determines the aggregation type. The aggregation uses +// the `//` tile as a bounding box. +// * Optionally, a `geo_bounds` aggregation on the ``. The search only +// includes this aggregation if the `exact_bounds` parameter is `true`. +// * If the optional parameter `with_labels` is `true`, the internal search will +// include a dynamic runtime field that calls the `getLabelPosition` function of +// the geometry doc value. This enables the generation of new point features +// containing suggested geometry labels, so that, for example, multi-polygons +// will have only one label. +// +// For example, Elasticsearch may translate a vector tile search API request +// with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of +// `true` into the following search +// +// ``` +// GET my-index/_search +// +// { +// "size": 10000, +// "query": { +// "geo_bounding_box": { +// "my-geo-field": { +// "top_left": { +// "lat": -40.979898069620134, +// "lon": -45 +// }, +// "bottom_right": { +// "lat": -66.51326044311186, +// "lon": 0 +// } +// } +// } +// }, +// "aggregations": { +// "grid": { +// "geotile_grid": { +// "field": "my-geo-field", +// "precision": 11, +// "size": 65536, +// "bounds": { +// "top_left": { +// "lat": -40.979898069620134, +// "lon": -45 +// }, +// "bottom_right": { +// "lat": -66.51326044311186, +// "lon": 0 +// } +// } +// } +// }, +// "bounds": { +// "geo_bounds": { +// "field": "my-geo-field", +// "wrap_longitude": false +// } +// } +// } +// } +// +// ``` +// +// The API returns results as a binary Mapbox vector tile. +// Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the +// tile contains three layers: +// +// * A `hits` layer containing a feature for each `` value matching the +// `geo_bounding_box` query. +// * An `aggs` layer containing a feature for each cell of the `geotile_grid` or +// `geohex_grid`. The layer only contains features for cells with matching data. +// * A meta layer containing: +// - A feature containing a bounding box. By default, this is the bounding box +// +// of the tile. +// - Value ranges for any sub-aggregations on the `geotile_grid` or +// +// `geohex_grid`. +// - Metadata for the search. +// +// The API only returns features that can display at its zoom level. +// For example, if a polygon feature has no area at its zoom level, the API +// omits it. +// The API returns errors as UTF-8 encoded JSON. +// +// IMPORTANT: You can specify several options for this API as either a query +// parameter or request body parameter. +// If you specify both parameters, the query parameter takes precedence. +// +// **Grid precision for geotile** +// +// For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles +// for lower zoom levels. +// `grid_precision` represents the additional zoom levels available through +// these cells. The final precision is computed by as follows: ` + +// grid_precision`. +// For example, if `` is 7 and `grid_precision` is 8, then the +// `geotile_grid` aggregation will use a precision of 15. +// The maximum final precision is 29. +// The `grid_precision` also determines the number of cells for the grid as +// follows: `(2^grid_precision) x (2^grid_precision)`. +// For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +// The `aggs` layer only contains features for cells with matching data. +// +// **Grid precision for geohex** +// +// For a `grid_agg` of `geohex`, Elasticsearch uses `` and +// `grid_precision` to calculate a final precision as follows: ` + +// grid_precision`. +// +// This precision determines the H3 resolution of the hexagonal cells produced +// by the `geohex` aggregation. +// The following table maps the H3 resolution for each precision. +// For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +// At a precision of 6, hexagonal cells have an H3 resolution of 2. +// If `` is 3 and `grid_precision` is 4, the precision is 7. +// At a precision of 7, hexagonal cells have an H3 resolution of 3. +// +// | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +// | --------- | ---------------- | ------------- | ----------------| ----- | +// | 1 | 4 | 0 | 122 | 30.5 | +// | 2 | 16 | 0 | 122 | 7.625 | +// | 3 | 64 | 1 | 842 | 13.15625 | +// | 4 | 256 | 1 | 842 | 3.2890625 | +// | 5 | 1024 | 2 | 5882 | 5.744140625 | +// | 6 | 4096 | 2 | 5882 | 1.436035156 | +// | 7 | 16384 | 3 | 41162 | 2.512329102 | +// | 8 | 65536 | 3 | 41162 | 0.6280822754 | +// | 9 | 262144 | 4 | 288122 | 1.099098206 | +// | 10 | 1048576 | 4 | 288122 | 0.2747745514 | +// | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +// | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +// | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +// | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +// | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +// | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +// | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +// | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +// | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +// | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +// | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +// | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +// | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +// | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +// | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +// | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +// | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +// | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +// | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | +// +// Hexagonal cells don't align perfectly on a vector tile. +// Some cells may intersect more than one vector tile. +// To compute the H3 resolution for each precision, Elasticsearch compares the +// average density of hexagonal bins at each resolution with the average density +// of tile bins at each zoom level. +// Elasticsearch uses the H3 resolution that is closest to the corresponding +// geotile density. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html func New(tp elastictransport.Interface) *SearchMvt { @@ -115,8 +449,6 @@ func New(tp elastictransport.Interface) *SearchMvt { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -441,151 +773,263 @@ func (r *SearchMvt) Pretty(pretty bool) *SearchMvt { return r } -// Aggs Sub-aggregations for the geotile_grid. +// Sub-aggregations for the geotile_grid. // -// Supports the following aggregation types: -// - avg -// - cardinality -// - max -// - min -// - sum +// It supports the following aggregation types: +// +// - `avg` +// - `boxplot` +// - `cardinality` +// - `extended stats` +// - `max` +// - `median absolute deviation` +// - `min` +// - `percentile` +// - `percentile-rank` +// - `stats` +// - `sum` +// - `value count` +// +// The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is +// reserved for internal aggregations. // API name: aggs func (r *SearchMvt) Aggs(aggs map[string]types.Aggregations) *SearchMvt { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggs = aggs + return r +} + +func (r *SearchMvt) AddAgg(key string, value types.AggregationsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggs == nil { + r.req.Aggs = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggs + } + + tmp[key] = *value.AggregationsCaster() + r.req.Aggs = tmp return r } -// Buffer Size, in pixels, of a clipping buffer outside the tile. This allows renderers +// The size, in pixels, of a clipping buffer outside the tile. This allows +// renderers // to avoid outline artifacts from geometries that extend past the extent of the // tile. // API name: buffer func (r *SearchMvt) Buffer(buffer int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Buffer = &buffer return r } -// ExactBounds If false, the meta layer’s feature is the bounding box of the tile. -// If true, the meta layer’s feature is a bounding box resulting from a -// geo_bounds aggregation. The aggregation runs on values that intersect -// the // tile with wrap_longitude set to false. The resulting +// If `false`, the meta layer's feature is the bounding box of the tile. +// If `true`, the meta layer's feature is a bounding box resulting from a +// `geo_bounds` aggregation. The aggregation runs on values that +// intersect +// the `//` tile with `wrap_longitude` set to `false`. The resulting // bounding box may be larger than the vector tile. // API name: exact_bounds func (r *SearchMvt) ExactBounds(exactbounds bool) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ExactBounds = &exactbounds return r } -// Extent Size, in pixels, of a side of the tile. Vector tiles are square with equal -// sides. +// The size, in pixels, of a side of the tile. Vector tiles are square with +// equal sides. // API name: extent func (r *SearchMvt) Extent(extent int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Extent = &extent return r } -// Fields Fields to return in the `hits` layer. Supports wildcards (`*`). +// The fields to return in the `hits` layer. +// It supports wildcards (`*`). // This parameter does not support fields with array values. Fields with array // values may return inconsistent results. // API name: fields func (r *SearchMvt) Fields(fields ...string) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Fields = fields return r } -// GridAgg Aggregation used to create a grid for the `field`. +// The aggregation used to create a grid for the `field`. // API name: grid_agg func (r *SearchMvt) GridAgg(gridagg gridaggregationtype.GridAggregationType) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GridAgg = &gridagg - return r } -// GridPrecision Additional zoom levels available through the aggs layer. For example, if -// is 7 -// and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, -// results -// don’t include the aggs layer. +// Additional zoom levels available through the aggs layer. For example, if +// `` is `7` +// and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If +// 0, results +// don't include the aggs layer. // API name: grid_precision func (r *SearchMvt) GridPrecision(gridprecision int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.GridPrecision = &gridprecision return r } -// GridType Determines the geometry type for features in the aggs layer. In the aggs +// Determines the geometry type for features in the aggs layer. In the aggs // layer, -// each feature represents a geotile_grid cell. If 'grid' each feature is a -// Polygon -// of the cells bounding box. If 'point' each feature is a Point that is the +// each feature represents a `geotile_grid` cell. If `grid, each feature is a +// polygon +// of the cells bounding box. If `point`, each feature is a Point that is the // centroid // of the cell. // API name: grid_type func (r *SearchMvt) GridType(gridtype gridtype.GridType) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GridType = &gridtype - return r } -// Query Query DSL used to filter documents for the search. +// The query DSL used to filter documents for the search. // API name: query -func (r *SearchMvt) Query(query *types.Query) *SearchMvt { +func (r *SearchMvt) Query(query types.QueryVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *SearchMvt) RuntimeMappings(runtimefields types.RuntimeFields) *SearchMvt { - r.req.RuntimeMappings = runtimefields +func (r *SearchMvt) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// Size Maximum number of features to return in the hits layer. Accepts 0-10000. -// If 0, results don’t include the hits layer. +// The maximum number of features to return in the hits layer. Accepts 0-10000. +// If 0, results don't include the hits layer. // API name: size func (r *SearchMvt) Size(size int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort Sorts features in the hits layer. By default, the API calculates a bounding -// box for each feature. It sorts features based on this box’s diagonal length, +// Sort the features in the hits layer. By default, the API calculates a +// bounding +// box for each feature. It sorts features based on this box's diagonal length, // from longest to shortest. // API name: sort -func (r *SearchMvt) Sort(sorts ...types.SortCombinations) *SearchMvt { - r.req.Sort = sorts +func (r *SearchMvt) Sort(sorts ...types.SortCombinationsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } -// TrackTotalHits Number of hits matching the query to count accurately. If `true`, the exact -// number +// The number of hits matching the query to count accurately. If `true`, the +// exact number // of hits is returned at the cost of some performance. If `false`, the response // does // not include the total number of hits matching the query. // API name: track_total_hits -func (r *SearchMvt) TrackTotalHits(trackhits types.TrackHits) *SearchMvt { - r.req.TrackTotalHits = trackhits +func (r *SearchMvt) TrackTotalHits(trackhits types.TrackHitsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() return r } -// WithLabels If `true`, the hits and aggs layers will contain additional point features +// If `true`, the hits and aggs layers will contain additional point features // representing // suggested label positions for the original features. +// +// * `Point` and `MultiPoint` features will have one of the points selected. +// * `Polygon` and `MultiPolygon` features will have a single point generated, +// either the centroid, if it is within the polygon, or another point within the +// polygon selected from the sorted triangle-tree. +// * `LineString` features will likewise provide a roughly central point +// selected from the triangle-tree. +// * The aggregation results will provide one central point for each aggregation +// bucket. +// +// All attributes from the original features will also be copied to the new +// label features. +// In addition, the new features will be distinguishable using the tag +// `_mvt_label_position`. // API name: with_labels func (r *SearchMvt) WithLabels(withlabels bool) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.WithLabels = &withlabels return r diff --git a/typedapi/core/searchshards/response.go b/typedapi/core/searchshards/response.go index 071101ce2d..4d8478ca32 100644 --- a/typedapi/core/searchshards/response.go +++ b/typedapi/core/searchshards/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package searchshards @@ -26,18 +26,18 @@ import ( // Response holds the response body struct for the package searchshards // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search_shards/SearchShardsResponse.ts#L25-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search_shards/SearchShardsResponse.ts#L34-L40 type Response struct { - Indices map[string]types.ShardStoreIndex `json:"indices"` - Nodes map[string]types.NodeAttributes `json:"nodes"` - Shards [][]types.NodeShard `json:"shards"` + Indices map[string]types.ShardStoreIndex `json:"indices"` + Nodes map[string]types.SearchShardsNodeAttributes `json:"nodes"` + Shards [][]types.NodeShard `json:"shards"` } // NewResponse returns a Response func NewResponse() *Response { r := &Response{ Indices: make(map[string]types.ShardStoreIndex, 0), - Nodes: make(map[string]types.NodeAttributes, 0), + Nodes: make(map[string]types.SearchShardsNodeAttributes, 0), } return r } diff --git a/typedapi/core/searchshards/search_shards.go b/typedapi/core/searchshards/search_shards.go index e1c28bf09b..6224f2197d 100644 --- a/typedapi/core/searchshards/search_shards.go +++ b/typedapi/core/searchshards/search_shards.go @@ -16,10 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about the indices and shards that a search request would -// be executed against. +// Get the search shards. +// +// Get the indices and shards that a search request would be run against. +// This information can be useful for working out issues or planning +// optimizations with routing and shard preferences. +// When filtered aliases are used, the filter is returned as part of the +// `indices` section. +// +// If the Elasticsearch security features are enabled, you must have the +// `view_index_metadata` or `manage` index privilege for the target data stream, +// index, or alias. package searchshards import ( @@ -76,8 +85,17 @@ func NewSearchShardsFunc(tp elastictransport.Interface) NewSearchShards { } } -// Returns information about the indices and shards that a search request would -// be executed against. +// Get the search shards. +// +// Get the indices and shards that a search request would be run against. +// This information can be useful for working out issues or planning +// optimizations with routing and shard preferences. +// When filtered aliases are used, the filter is returned as part of the +// `indices` section. +// +// If the Elasticsearch security features are enabled, you must have the +// `view_index_metadata` or `manage` index privilege for the target data stream, +// index, or alias. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html func New(tp elastictransport.Interface) *SearchShards { @@ -294,8 +312,10 @@ func (r *SearchShards) Header(key, value string) *SearchShards { return r } -// Index Returns the indices and shards that a search request would be executed -// against. +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). +// To search all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index func (r *SearchShards) Index(index string) *SearchShards { r.paramSet |= indexMask @@ -349,8 +369,19 @@ func (r *SearchShards) Local(local bool) *SearchShards { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// IT can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *SearchShards) MasterTimeout(duration string) *SearchShards { + r.values.Set("master_timeout", duration) + + return r +} + +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *SearchShards) Preference(preference string) *SearchShards { r.values.Set("preference", preference) @@ -358,7 +389,7 @@ func (r *SearchShards) Preference(preference string) *SearchShards { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *SearchShards) Routing(routing string) *SearchShards { r.values.Set("routing", routing) diff --git a/typedapi/core/searchtemplate/request.go b/typedapi/core/searchtemplate/request.go index 9892bd2a21..efcd2235da 100644 --- a/typedapi/core/searchtemplate/request.go +++ b/typedapi/core/searchtemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package searchtemplate @@ -31,13 +31,15 @@ import ( // Request holds the request body struct for the package searchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search_template/SearchTemplateRequest.ts#L32-L134 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search_template/SearchTemplateRequest.ts#L32-L153 type Request struct { // Explain If `true`, returns detailed information about score calculation as part of // each hit. + // If you specify both this and the `explain` query parameter, the API uses only + // the query parameter. Explain *bool `json:"explain,omitempty"` - // Id ID of the search template to use. If no source is specified, + // Id The ID of the search template to use. If no `source` is specified, // this parameter is required. Id *string `json:"id,omitempty"` // Params Key-value pairs used to replace Mustache variables in the template. @@ -47,7 +49,8 @@ type Request struct { // Profile If `true`, the query execution is profiled. Profile *bool `json:"profile,omitempty"` // Source An inline search template. Supports the same parameters as the search API's - // request body. Also supports Mustache variables. If no id is specified, this + // request body. It also supports Mustache variables. If no `id` is specified, + // this // parameter is required. Source *string `json:"source,omitempty"` } diff --git a/typedapi/core/searchtemplate/response.go b/typedapi/core/searchtemplate/response.go index f640edeaa2..9e7bac4378 100644 --- a/typedapi/core/searchtemplate/response.go +++ b/typedapi/core/searchtemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package searchtemplate @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package searchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search_template/SearchTemplateResponse.ts#L30-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search_template/SearchTemplateResponse.ts#L30-L48 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` @@ -504,6 +504,13 @@ func (s *Response) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { diff --git a/typedapi/core/searchtemplate/search_template.go b/typedapi/core/searchtemplate/search_template.go index 85732c8416..f5a7f52193 100644 --- a/typedapi/core/searchtemplate/search_template.go +++ b/typedapi/core/searchtemplate/search_template.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Runs a search with a search template. +// Run a search with a search template. package searchtemplate import ( @@ -81,9 +81,9 @@ func NewSearchTemplateFunc(tp elastictransport.Interface) NewSearchTemplate { } } -// Runs a search with a search template. +// Run a search with a search template. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template-api.html func New(tp elastictransport.Interface) *SearchTemplate { r := &SearchTemplate{ transport: tp, @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *SearchTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -315,8 +313,8 @@ func (r *SearchTemplate) Header(key, value string) *SearchTemplate { return r } -// Index Comma-separated list of data streams, indices, -// and aliases to search. Supports wildcards (*). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // API Name: index func (r *SearchTemplate) Index(index string) *SearchTemplate { r.paramSet |= indexMask @@ -346,7 +344,7 @@ func (r *SearchTemplate) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Sear return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. // Supports comma-separated values, such as `open,hidden`. @@ -380,8 +378,8 @@ func (r *SearchTemplate) IgnoreUnavailable(ignoreunavailable bool) *SearchTempla return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *SearchTemplate) Preference(preference string) *SearchTemplate { r.values.Set("preference", preference) @@ -389,7 +387,7 @@ func (r *SearchTemplate) Preference(preference string) *SearchTemplate { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *SearchTemplate) Routing(routing string) *SearchTemplate { r.values.Set("routing", routing) @@ -414,7 +412,8 @@ func (r *SearchTemplate) SearchType(searchtype searchtype.SearchType) *SearchTem return r } -// RestTotalHitsAsInt If true, hits.total are rendered as an integer in the response. +// RestTotalHitsAsInt If `true`, `hits.total` is rendered as an integer in the response. +// If `false`, it is rendered as an object. // API name: rest_total_hits_as_int func (r *SearchTemplate) RestTotalHitsAsInt(resttotalhitsasint bool) *SearchTemplate { r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) @@ -475,48 +474,91 @@ func (r *SearchTemplate) Pretty(pretty bool) *SearchTemplate { return r } -// Explain If `true`, returns detailed information about score calculation as part of +// If `true`, returns detailed information about score calculation as part of // each hit. +// If you specify both this and the `explain` query parameter, the API uses only +// the query parameter. // API name: explain func (r *SearchTemplate) Explain(explain bool) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Id ID of the search template to use. If no source is specified, +// The ID of the search template to use. If no `source` is specified, // this parameter is required. // API name: id func (r *SearchTemplate) Id(id string) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Id = &id return r } -// Params Key-value pairs used to replace Mustache variables in the template. +// Key-value pairs used to replace Mustache variables in the template. // The key is the variable name. // The value is the variable value. // API name: params func (r *SearchTemplate) Params(params map[string]json.RawMessage) *SearchTemplate { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Params = params + return r +} +func (r *SearchTemplate) AddParam(key string, value json.RawMessage) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp return r } -// Profile If `true`, the query execution is profiled. +// If `true`, the query execution is profiled. // API name: profile func (r *SearchTemplate) Profile(profile bool) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Source An inline search template. Supports the same parameters as the search API's -// request body. Also supports Mustache variables. If no id is specified, this +// An inline search template. Supports the same parameters as the search API's +// request body. It also supports Mustache variables. If no `id` is specified, +// this // parameter is required. // API name: source func (r *SearchTemplate) Source(source string) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Source = &source diff --git a/typedapi/core/termsenum/request.go b/typedapi/core/termsenum/request.go index 1c03ffe77b..9a513014a2 100644 --- a/typedapi/core/termsenum/request.go +++ b/typedapi/core/termsenum/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package termsenum @@ -33,27 +33,33 @@ import ( // Request holds the request body struct for the package termsenum // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/terms_enum/TermsEnumRequest.ts#L26-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/terms_enum/TermsEnumRequest.ts#L26-L93 type Request struct { - // CaseInsensitive When true the provided search string is matched against index terms without - // case sensitivity. + // CaseInsensitive When `true`, the provided search string is matched against index terms + // without case sensitivity. CaseInsensitive *bool `json:"case_insensitive,omitempty"` // Field The string to match at the start of indexed terms. If not provided, all terms // in the field are considered. Field string `json:"field"` - // IndexFilter Allows to filter an index shard if the provided query rewrites to match_none. + // IndexFilter Filter an index shard if the provided query rewrites to `match_none`. IndexFilter *types.Query `json:"index_filter,omitempty"` - SearchAfter *string `json:"search_after,omitempty"` - // Size How many matching terms to return. + // SearchAfter The string after which terms in the index should be returned. + // It allows for a form of pagination if the last result from one request is + // passed as the `search_after` parameter for a subsequent request. + SearchAfter *string `json:"search_after,omitempty"` + // Size The number of matching terms to return. Size *int `json:"size,omitempty"` - // String The string after which terms in the index should be returned. Allows for a - // form of pagination if the last result from one request is passed as the - // search_after parameter for a subsequent request. + // String The string to match at the start of indexed terms. + // If it is not provided, all terms in the field are considered. + // + // > info + // > The prefix string cannot be larger than the largest possible keyword value, + // which is Lucene's term byte-length limit of 32766. String *string `json:"string,omitempty"` - // Timeout The maximum length of time to spend collecting results. Defaults to "1s" (one - // second). If the timeout is exceeded the complete flag set to false in the - // response and the results may be partial or empty. + // Timeout The maximum length of time to spend collecting results. + // If the timeout is exceeded the `complete` flag set to `false` in the response + // and the results may be partial or empty. Timeout types.Duration `json:"timeout,omitempty"` } diff --git a/typedapi/core/termsenum/response.go b/typedapi/core/termsenum/response.go index 4b3edb4466..227d6518f7 100644 --- a/typedapi/core/termsenum/response.go +++ b/typedapi/core/termsenum/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package termsenum @@ -26,8 +26,13 @@ import ( // Response holds the response body struct for the package termsenum // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/terms_enum/TermsEnumResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/terms_enum/TermsEnumResponse.ts#L22-L32 type Response struct { + + // Complete If `false`, the returned terms set may be incomplete and should be treated as + // approximate. + // This can occur due to a few reasons, such as a request timeout or a node + // error. Complete bool `json:"complete"` Shards_ types.ShardStatistics `json:"_shards"` Terms []string `json:"terms"` diff --git a/typedapi/core/termsenum/terms_enum.go b/typedapi/core/termsenum/terms_enum.go index b72e04377e..c840c40abf 100644 --- a/typedapi/core/termsenum/terms_enum.go +++ b/typedapi/core/termsenum/terms_enum.go @@ -16,11 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// The terms enum API can be used to discover terms in the index that begin -// with the provided string. It is designed for low-latency look-ups used in -// auto-complete scenarios. +// Get terms in an index. +// +// Discover terms that match a partial string in an index. +// This API is designed for low-latency look-ups used in auto-complete +// scenarios. +// +// > info +// > The terms enum API may return terms from deleted documents. Deleted +// documents are initially only marked as deleted. It is not until their +// segments are merged that documents are actually deleted. Until that happens, +// the terms enum API will return terms from these documents. package termsenum import ( @@ -83,9 +91,17 @@ func NewTermsEnumFunc(tp elastictransport.Interface) NewTermsEnum { } } -// The terms enum API can be used to discover terms in the index that begin -// with the provided string. It is designed for low-latency look-ups used in -// auto-complete scenarios. +// Get terms in an index. +// +// Discover terms that match a partial string in an index. +// This API is designed for low-latency look-ups used in auto-complete +// scenarios. +// +// > info +// > The terms enum API may return terms from deleted documents. Deleted +// documents are initially only marked as deleted. It is not until their +// segments are merged that documents are actually deleted. Until that happens, +// the terms enum API will return terms from these documents. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html func New(tp elastictransport.Interface) *TermsEnum { @@ -95,8 +111,6 @@ func New(tp elastictransport.Interface) *TermsEnum { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -308,8 +322,10 @@ func (r *TermsEnum) Header(key, value string) *TermsEnum { return r } -// Index Comma-separated list of data streams, indices, and index aliases to search. -// Wildcard (*) expressions are supported. +// Index A comma-separated list of data streams, indices, and index aliases to search. +// Wildcard (`*`) expressions are supported. +// To search all data streams or indices, omit this parameter or use `*` or +// `_all`. // API Name: index func (r *TermsEnum) _index(index string) *TermsEnum { r.paramSet |= indexMask @@ -362,66 +378,104 @@ func (r *TermsEnum) Pretty(pretty bool) *TermsEnum { return r } -// CaseInsensitive When true the provided search string is matched against index terms without -// case sensitivity. +// When `true`, the provided search string is matched against index terms +// without case sensitivity. // API name: case_insensitive func (r *TermsEnum) CaseInsensitive(caseinsensitive bool) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CaseInsensitive = &caseinsensitive return r } -// Field The string to match at the start of indexed terms. If not provided, all terms +// The string to match at the start of indexed terms. If not provided, all terms // in the field are considered. // API name: field func (r *TermsEnum) Field(field string) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Field = field return r } -// IndexFilter Allows to filter an index shard if the provided query rewrites to match_none. +// Filter an index shard if the provided query rewrites to `match_none`. // API name: index_filter -func (r *TermsEnum) IndexFilter(indexfilter *types.Query) *TermsEnum { +func (r *TermsEnum) IndexFilter(indexfilter types.QueryVariant) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexFilter = indexfilter + r.req.IndexFilter = indexfilter.QueryCaster() return r } +// The string after which terms in the index should be returned. +// It allows for a form of pagination if the last result from one request is +// passed as the `search_after` parameter for a subsequent request. // API name: search_after func (r *TermsEnum) SearchAfter(searchafter string) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.SearchAfter = &searchafter return r } -// Size How many matching terms to return. +// The number of matching terms to return. // API name: size func (r *TermsEnum) Size(size int) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// String The string after which terms in the index should be returned. Allows for a -// form of pagination if the last result from one request is passed as the -// search_after parameter for a subsequent request. +// The string to match at the start of indexed terms. +// If it is not provided, all terms in the field are considered. +// +// > info +// > The prefix string cannot be larger than the largest possible keyword value, +// which is Lucene's term byte-length limit of 32766. // API name: string func (r *TermsEnum) String(string string) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.String = &string return r } -// Timeout The maximum length of time to spend collecting results. Defaults to "1s" (one -// second). If the timeout is exceeded the complete flag set to false in the -// response and the results may be partial or empty. +// The maximum length of time to spend collecting results. +// If the timeout is exceeded the `complete` flag set to `false` in the response +// and the results may be partial or empty. // API name: timeout -func (r *TermsEnum) Timeout(duration types.Duration) *TermsEnum { - r.req.Timeout = duration +func (r *TermsEnum) Timeout(duration types.DurationVariant) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/core/termvectors/request.go b/typedapi/core/termvectors/request.go index f99c3cfdb9..59cd19848b 100644 --- a/typedapi/core/termvectors/request.go +++ b/typedapi/core/termvectors/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package termvectors @@ -29,15 +29,23 @@ import ( // Request holds the request body struct for the package termvectors // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/termvectors/TermVectorsRequest.ts#L33-L120 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/termvectors/TermVectorsRequest.ts#L33-L187 type Request struct { // Doc An artificial document (a document not present in the index) for which you // want to retrieve term vectors. Doc json.RawMessage `json:"doc,omitempty"` // Filter Filter terms based on their tf-idf scores. + // This could be useful in order find out a good characteristic vector of a + // document. + // This feature works in a similar manner to the second phase of the More Like + // This Query. Filter *types.TermVectorsFilter `json:"filter,omitempty"` - // PerFieldAnalyzer Overrides the default per-field analyzer. + // PerFieldAnalyzer Override the default per-field analyzer. + // This is useful in order to generate term vectors in any fashion, especially + // when using artificial documents. + // When providing an analyzer for a field that already stores term vectors, the + // term vectors will be regenerated. PerFieldAnalyzer map[string]string `json:"per_field_analyzer,omitempty"` } diff --git a/typedapi/core/termvectors/response.go b/typedapi/core/termvectors/response.go index 3d42fc37ce..ad652de2f7 100644 --- a/typedapi/core/termvectors/response.go +++ b/typedapi/core/termvectors/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package termvectors @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package termvectors // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/termvectors/TermVectorsResponse.ts#L25-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/termvectors/TermVectorsResponse.ts#L25-L34 type Response struct { Found bool `json:"found"` Id_ *string `json:"_id,omitempty"` diff --git a/typedapi/core/termvectors/termvectors.go b/typedapi/core/termvectors/termvectors.go index fc32c0057a..d97c7dfbfd 100644 --- a/typedapi/core/termvectors/termvectors.go +++ b/typedapi/core/termvectors/termvectors.go @@ -16,11 +16,62 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get term vector information. -// Returns information and statistics about terms in the fields of a particular +// +// Get information and statistics about terms in the fields of a particular // document. +// +// You can retrieve term vectors for documents stored in the index or for +// artificial documents passed in the body of the request. +// You can specify the fields you are interested in through the `fields` +// parameter or by adding the fields to the request body. +// For example: +// +// ``` +// GET /my-index-000001/_termvectors/1?fields=message +// ``` +// +// Fields can be specified using wildcards, similar to the multi match query. +// +// Term vectors are real-time by default, not near real-time. +// This can be changed by setting `realtime` parameter to `false`. +// +// You can request three types of values: _term information_, _term statistics_, +// and _field statistics_. +// By default, all term information and field statistics are returned for all +// fields but term statistics are excluded. +// +// **Term information** +// +// * term frequency in the field (always returned) +// * term positions (`positions: true`) +// * start and end offsets (`offsets: true`) +// * term payloads (`payloads: true`), as base64 encoded bytes +// +// If the requested information wasn't stored in the index, it will be computed +// on the fly if possible. +// Additionally, term vectors could be computed for documents not even existing +// in the index, but instead provided by the user. +// +// > warn +// > Start and end offsets assume UTF-16 encoding is being used. If you want to +// use these offsets in order to get the original text that produced this token, +// you should make sure that the string you are taking a sub-string of is also +// encoded using UTF-16. +// +// **Behaviour** +// +// The term and field statistics are not accurate. +// Deleted documents are not taken into account. +// The information is only retrieved for the shard the requested document +// resides in. +// The term and field statistics are therefore only useful as relative measures +// whereas the absolute numbers have no meaning in this context. +// By default, when requesting term vectors of artificial documents, a shard to +// get the statistics from is randomly selected. +// Use `routing` only to hit a particular shard. package termvectors import ( @@ -88,9 +139,60 @@ func NewTermvectorsFunc(tp elastictransport.Interface) NewTermvectors { } // Get term vector information. -// Returns information and statistics about terms in the fields of a particular +// +// Get information and statistics about terms in the fields of a particular // document. // +// You can retrieve term vectors for documents stored in the index or for +// artificial documents passed in the body of the request. +// You can specify the fields you are interested in through the `fields` +// parameter or by adding the fields to the request body. +// For example: +// +// ``` +// GET /my-index-000001/_termvectors/1?fields=message +// ``` +// +// Fields can be specified using wildcards, similar to the multi match query. +// +// Term vectors are real-time by default, not near real-time. +// This can be changed by setting `realtime` parameter to `false`. +// +// You can request three types of values: _term information_, _term statistics_, +// and _field statistics_. +// By default, all term information and field statistics are returned for all +// fields but term statistics are excluded. +// +// **Term information** +// +// * term frequency in the field (always returned) +// * term positions (`positions: true`) +// * start and end offsets (`offsets: true`) +// * term payloads (`payloads: true`), as base64 encoded bytes +// +// If the requested information wasn't stored in the index, it will be computed +// on the fly if possible. +// Additionally, term vectors could be computed for documents not even existing +// in the index, but instead provided by the user. +// +// > warn +// > Start and end offsets assume UTF-16 encoding is being used. If you want to +// use these offsets in order to get the original text that produced this token, +// you should make sure that the string you are taking a sub-string of is also +// encoded using UTF-16. +// +// **Behaviour** +// +// The term and field statistics are not accurate. +// Deleted documents are not taken into account. +// The information is only retrieved for the shard the requested document +// resides in. +// The term and field statistics are therefore only useful as relative measures +// whereas the absolute numbers have no meaning in this context. +// By default, when requesting term vectors of artificial documents, a shard to +// get the statistics from is randomly selected. +// Use `routing` only to hit a particular shard. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html func New(tp elastictransport.Interface) *Termvectors { r := &Termvectors{ @@ -99,8 +201,6 @@ func New(tp elastictransport.Interface) *Termvectors { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -329,7 +429,7 @@ func (r *Termvectors) Header(key, value string) *Termvectors { return r } -// Index Name of the index that contains the document. +// Index The name of the index that contains the document. // API Name: index func (r *Termvectors) _index(index string) *Termvectors { r.paramSet |= indexMask @@ -338,7 +438,7 @@ func (r *Termvectors) _index(index string) *Termvectors { return r } -// Id Unique identifier of the document. +// Id A unique identifier for the document. // API Name: id func (r *Termvectors) Id(id string) *Termvectors { r.paramSet |= idMask @@ -347,10 +447,10 @@ func (r *Termvectors) Id(id string) *Termvectors { return r } -// Fields Comma-separated list or wildcard expressions of fields to include in the +// Fields A comma-separated list or wildcard expressions of fields to include in the // statistics. -// Used as the default list unless a specific field list is provided in the -// `completion_fields` or `fielddata_fields` parameters. +// It is used as the default list unless a specific field list is provided in +// the `completion_fields` or `fielddata_fields` parameters. // API name: fields func (r *Termvectors) Fields(fields ...string) *Termvectors { r.values.Set("fields", strings.Join(fields, ",")) @@ -358,8 +458,13 @@ func (r *Termvectors) Fields(fields ...string) *Termvectors { return r } -// FieldStatistics If `true`, the response includes the document count, sum of document -// frequencies, and sum of total term frequencies. +// FieldStatistics If `true`, the response includes: +// +// * The document count (how many documents contain this field). +// * The sum of document frequencies (the sum of document frequencies for all +// terms in this field). +// * The sum of total term frequencies (the sum of total term frequencies of +// each term in this field). // API name: field_statistics func (r *Termvectors) FieldStatistics(fieldstatistics bool) *Termvectors { r.values.Set("field_statistics", strconv.FormatBool(fieldstatistics)) @@ -391,8 +496,8 @@ func (r *Termvectors) Positions(positions bool) *Termvectors { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *Termvectors) Preference(preference string) *Termvectors { r.values.Set("preference", preference) @@ -408,7 +513,7 @@ func (r *Termvectors) Realtime(realtime bool) *Termvectors { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value that is used to route operations to a specific shard. // API name: routing func (r *Termvectors) Routing(routing string) *Termvectors { r.values.Set("routing", routing) @@ -416,7 +521,14 @@ func (r *Termvectors) Routing(routing string) *Termvectors { return r } -// TermStatistics If `true`, the response includes term frequency and document frequency. +// TermStatistics If `true`, the response includes: +// +// * The total term frequency (how often a term occurs in all documents). +// * The document frequency (the number of documents containing the current +// term). +// +// By default these values are not returned since term statistics can have a +// serious performance impact. // API name: term_statistics func (r *Termvectors) TermStatistics(termstatistics bool) *Termvectors { r.values.Set("term_statistics", strconv.FormatBool(termstatistics)) @@ -432,7 +544,7 @@ func (r *Termvectors) Version(versionnumber string) *Termvectors { return r } -// VersionType Specific version type. +// VersionType The version type. // API name: version_type func (r *Termvectors) VersionType(versiontype versiontype.VersionType) *Termvectors { r.values.Set("version_type", versiontype.String()) @@ -484,14 +596,14 @@ func (r *Termvectors) Pretty(pretty bool) *Termvectors { return r } -// Doc An artificial document (a document not present in the index) for which you +// An artificial document (a document not present in the index) for which you // want to retrieve term vectors. // API name: doc -// -// doc should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *Termvectors) Doc(doc any) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := doc.(type) { case json.RawMessage: r.req.Doc = casted @@ -505,24 +617,56 @@ func (r *Termvectors) Doc(doc any) *Termvectors { return nil }) } - return r } -// Filter Filter terms based on their tf-idf scores. +// Filter terms based on their tf-idf scores. +// This could be useful in order find out a good characteristic vector of a +// document. +// This feature works in a similar manner to the second phase of the More Like +// This Query. // API name: filter -func (r *Termvectors) Filter(filter *types.TermVectorsFilter) *Termvectors { +func (r *Termvectors) Filter(filter types.TermVectorsFilterVariant) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.Filter = filter.TermVectorsFilterCaster() return r } -// PerFieldAnalyzer Overrides the default per-field analyzer. +// Override the default per-field analyzer. +// This is useful in order to generate term vectors in any fashion, especially +// when using artificial documents. +// When providing an analyzer for a field that already stores term vectors, the +// term vectors will be regenerated. // API name: per_field_analyzer func (r *Termvectors) PerFieldAnalyzer(perfieldanalyzer map[string]string) *Termvectors { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.PerFieldAnalyzer = perfieldanalyzer + return r +} + +func (r *Termvectors) AddPerFieldAnalyzer(key string, value string) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]string + if r.req.PerFieldAnalyzer == nil { + r.req.PerFieldAnalyzer = make(map[string]string) + } else { + tmp = r.req.PerFieldAnalyzer + } + + tmp[key] = value + r.req.PerFieldAnalyzer = tmp return r } diff --git a/typedapi/core/update/request.go b/typedapi/core/update/request.go index a14e59c9c0..da5396a9a4 100644 --- a/typedapi/core/update/request.go +++ b/typedapi/core/update/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package update @@ -33,27 +33,29 @@ import ( // Request holds the request body struct for the package update // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/update/UpdateRequest.ts#L38-L153 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/update/UpdateRequest.ts#L38-L194 type Request struct { - // DetectNoop Set to false to disable setting 'result' in the response - // to 'noop' if no change to the document occurred. + // DetectNoop If `true`, the `result` in the response is set to `noop` (no operation) when + // there are no changes to the document. DetectNoop *bool `json:"detect_noop,omitempty"` // Doc A partial update to an existing document. + // If both `doc` and `script` are specified, `doc` is ignored. Doc json.RawMessage `json:"doc,omitempty"` - // DocAsUpsert Set to true to use the contents of 'doc' as the value of 'upsert' + // DocAsUpsert If `true`, use the contents of 'doc' as the value of 'upsert'. + // NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. DocAsUpsert *bool `json:"doc_as_upsert,omitempty"` - // Script Script to execute to update the document. + // Script The script to run to update the document. Script *types.Script `json:"script,omitempty"` - // ScriptedUpsert Set to true to execute the script whether or not the document exists. + // ScriptedUpsert If `true`, run the script whether or not the document exists. ScriptedUpsert *bool `json:"scripted_upsert,omitempty"` - // Source_ Set to false to disable source retrieval. You can also specify a - // comma-separated - // list of the fields you want to retrieve. + // Source_ If `false`, turn off source retrieval. + // You can also specify a comma-separated list of the fields you want to + // retrieve. Source_ types.SourceConfig `json:"_source,omitempty"` // Upsert If the document does not already exist, the contents of 'upsert' are inserted - // as a - // new document. If the document exists, the 'script' is executed. + // as a new document. + // If the document exists, the 'script' is run. Upsert json.RawMessage `json:"upsert,omitempty"` } diff --git a/typedapi/core/update/response.go b/typedapi/core/update/response.go index 467b8fba0f..08ed680e62 100644 --- a/typedapi/core/update/response.go +++ b/typedapi/core/update/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package update @@ -27,17 +27,26 @@ import ( // Response holds the response body struct for the package update // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/update/UpdateResponse.ts#L27-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/update/UpdateResponse.ts#L27-L29 type Response struct { - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Get *types.InlineGet `json:"get,omitempty"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Result result.Result `json:"result"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Shards_ types.ShardStatistics `json:"_shards"` - Version_ int64 `json:"_version"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + Get *types.InlineGet `json:"get,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` } // NewResponse returns a Response diff --git a/typedapi/core/update/update.go b/typedapi/core/update/update.go index 64432df232..1227c049ef 100644 --- a/typedapi/core/update/update.go +++ b/typedapi/core/update/update.go @@ -16,10 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Update a document. -// Updates a document by running a script or passing a partial document. +// +// Update a document by running a script or passing a partial document. +// +// If the Elasticsearch security features are enabled, you must have the `index` +// or `write` index privilege for the target index or index alias. +// +// The script can update, delete, or skip modifying the document. +// The API also supports passing a partial document, which is merged into the +// existing document. +// To fully replace an existing document, use the index API. +// This operation: +// +// * Gets the document (collocated with the shard) from the index. +// * Runs the specified script. +// * Indexes the result. +// +// The document must still be reindexed, but using this API removes some network +// roundtrips and reduces chances of version conflicts between the GET and the +// index operation. +// +// The `_source` field must be enabled to use this API. +// In addition to `_source`, you can access the following variables through the +// `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the +// current timestamp). package update import ( @@ -89,7 +112,30 @@ func NewUpdateFunc(tp elastictransport.Interface) NewUpdate { } // Update a document. -// Updates a document by running a script or passing a partial document. +// +// Update a document by running a script or passing a partial document. +// +// If the Elasticsearch security features are enabled, you must have the `index` +// or `write` index privilege for the target index or index alias. +// +// The script can update, delete, or skip modifying the document. +// The API also supports passing a partial document, which is merged into the +// existing document. +// To fully replace an existing document, use the index API. +// This operation: +// +// * Gets the document (collocated with the shard) from the index. +// * Runs the specified script. +// * Indexes the result. +// +// The document must still be reindexed, but using this API removes some network +// roundtrips and reduces chances of version conflicts between the GET and the +// index operation. +// +// The `_source` field must be enabled to use this API. +// In addition to `_source`, you can access the following variables through the +// `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the +// current timestamp). // // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html func New(tp elastictransport.Interface) *Update { @@ -99,8 +145,6 @@ func New(tp elastictransport.Interface) *Update { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -318,7 +362,7 @@ func (r *Update) Header(key, value string) *Update { return r } -// Id Document ID +// Id A unique identifier for the document to be updated. // API Name: id func (r *Update) _id(id string) *Update { r.paramSet |= idMask @@ -327,7 +371,8 @@ func (r *Update) _id(id string) *Update { return r } -// Index The name of the index +// Index The name of the target index. +// By default, the index is created automatically if it doesn't exist. // API Name: index func (r *Update) _index(index string) *Update { r.paramSet |= indexMask @@ -352,6 +397,15 @@ func (r *Update) IfSeqNo(sequencenumber string) *Update { return r } +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Update) IncludeSourceOnError(includesourceonerror bool) *Update { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + // Lang The script language. // API name: lang func (r *Update) Lang(lang string) *Update { @@ -361,9 +415,10 @@ func (r *Update) Lang(lang string) *Update { } // Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation -// visible to search, if 'wait_for' then wait for a refresh to make this -// operation -// visible to search, if 'false' do nothing with refreshes. +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', it does nothing with refreshes. // API name: refresh func (r *Update) Refresh(refresh refresh.Refresh) *Update { r.values.Set("refresh", refresh.String()) @@ -371,7 +426,7 @@ func (r *Update) Refresh(refresh refresh.Refresh) *Update { return r } -// RequireAlias If true, the destination must be an index alias. +// RequireAlias If `true`, the destination must be an index alias. // API name: require_alias func (r *Update) RequireAlias(requirealias bool) *Update { r.values.Set("require_alias", strconv.FormatBool(requirealias)) @@ -379,8 +434,7 @@ func (r *Update) RequireAlias(requirealias bool) *Update { return r } -// RetryOnConflict Specify how many times should the operation be retried when a conflict -// occurs. +// RetryOnConflict The number of times the operation should be retried when a conflict occurs. // API name: retry_on_conflict func (r *Update) RetryOnConflict(retryonconflict int) *Update { r.values.Set("retry_on_conflict", strconv.Itoa(retryonconflict)) @@ -388,7 +442,7 @@ func (r *Update) RetryOnConflict(retryonconflict int) *Update { return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *Update) Routing(routing string) *Update { r.values.Set("routing", routing) @@ -396,8 +450,9 @@ func (r *Update) Routing(routing string) *Update { return r } -// Timeout Period to wait for dynamic mapping updates and active shards. -// This guarantees Elasticsearch waits for at least the timeout before failing. +// Timeout The period to wait for the following operations: dynamic mapping updates and +// waiting for active shards. +// Elasticsearch waits for at least the timeout period before failing. // The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *Update) Timeout(duration string) *Update { @@ -406,11 +461,11 @@ func (r *Update) Timeout(duration string) *Update { return r } -// WaitForActiveShards The number of shard copies that must be active before proceeding with the -// operations. +// WaitForActiveShards The number of copies of each shard that must be active before proceeding with +// the operation. // Set to 'all' or any positive integer up to the total number of shards in the -// index -// (number_of_replicas+1). Defaults to 1 meaning the primary shard. +// index (`number_of_replicas`+1). +// The default value of `1` means it waits for each primary shard to be active. // API name: wait_for_active_shards func (r *Update) WaitForActiveShards(waitforactiveshards string) *Update { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -418,7 +473,7 @@ func (r *Update) WaitForActiveShards(waitforactiveshards string) *Update { return r } -// SourceExcludes_ Specify the source fields you want to exclude. +// SourceExcludes_ The source fields you want to exclude. // API name: _source_excludes func (r *Update) SourceExcludes_(fields ...string) *Update { r.values.Set("_source_excludes", strings.Join(fields, ",")) @@ -426,7 +481,7 @@ func (r *Update) SourceExcludes_(fields ...string) *Update { return r } -// SourceIncludes_ Specify the source fields you want to retrieve. +// SourceIncludes_ The source fields you want to retrieve. // API name: _source_includes func (r *Update) SourceIncludes_(fields ...string) *Update { r.values.Set("_source_includes", strings.Join(fields, ",")) @@ -478,22 +533,28 @@ func (r *Update) Pretty(pretty bool) *Update { return r } -// DetectNoop Set to false to disable setting 'result' in the response -// to 'noop' if no change to the document occurred. +// If `true`, the `result` in the response is set to `noop` (no operation) when +// there are no changes to the document. // API name: detect_noop func (r *Update) DetectNoop(detectnoop bool) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DetectNoop = &detectnoop return r } -// Doc A partial update to an existing document. +// A partial update to an existing document. +// If both `doc` and `script` are specified, `doc` is ignored. // API name: doc -// -// doc should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *Update) Doc(doc any) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := doc.(type) { case json.RawMessage: r.req.Doc = casted @@ -507,54 +568,73 @@ func (r *Update) Doc(doc any) *Update { return nil }) } - return r } -// DocAsUpsert Set to true to use the contents of 'doc' as the value of 'upsert' +// If `true`, use the contents of 'doc' as the value of 'upsert'. +// NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. // API name: doc_as_upsert func (r *Update) DocAsUpsert(docasupsert bool) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DocAsUpsert = &docasupsert return r } -// Script Script to execute to update the document. +// The script to run to update the document. // API name: script -func (r *Update) Script(script *types.Script) *Update { +func (r *Update) Script(script types.ScriptVariant) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = script + r.req.Script = script.ScriptCaster() return r } -// ScriptedUpsert Set to true to execute the script whether or not the document exists. +// If `true`, run the script whether or not the document exists. // API name: scripted_upsert func (r *Update) ScriptedUpsert(scriptedupsert bool) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScriptedUpsert = &scriptedupsert return r } -// Source_ Set to false to disable source retrieval. You can also specify a -// comma-separated -// list of the fields you want to retrieve. +// If `false`, turn off source retrieval. +// You can also specify a comma-separated list of the fields you want to +// retrieve. // API name: _source -func (r *Update) Source_(sourceconfig types.SourceConfig) *Update { - r.req.Source_ = sourceconfig +func (r *Update) Source_(sourceconfig types.SourceConfigVariant) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// Upsert If the document does not already exist, the contents of 'upsert' are inserted -// as a -// new document. If the document exists, the 'script' is executed. +// If the document does not already exist, the contents of 'upsert' are inserted +// as a new document. +// If the document exists, the 'script' is run. // API name: upsert -// -// upsert should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *Update) Upsert(upsert any) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := upsert.(type) { case json.RawMessage: r.req.Upsert = casted @@ -568,6 +648,5 @@ func (r *Update) Upsert(upsert any) *Update { return nil }) } - return r } diff --git a/typedapi/core/updatebyquery/request.go b/typedapi/core/updatebyquery/request.go index 9798f42605..c09a363a2e 100644 --- a/typedapi/core/updatebyquery/request.go +++ b/typedapi/core/updatebyquery/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatebyquery @@ -30,14 +30,15 @@ import ( // Request holds the request body struct for the package updatebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/update_by_query/UpdateByQueryRequest.ts#L37-L222 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/update_by_query/UpdateByQueryRequest.ts#L37-L336 type Request struct { - // Conflicts What to do if update by query hits version conflicts: `abort` or `proceed`. + // Conflicts The preferred behavior when update by query hits version conflicts: `abort` + // or `proceed`. Conflicts *conflicts.Conflicts `json:"conflicts,omitempty"` // MaxDocs The maximum number of documents to update. MaxDocs *int64 `json:"max_docs,omitempty"` - // Query Specifies the documents to update using the Query DSL. + // Query The documents to update using the Query DSL. Query *types.Query `json:"query,omitempty"` // Script The script to run to update the document source or metadata when updating. Script *types.Script `json:"script,omitempty"` diff --git a/typedapi/core/updatebyquery/response.go b/typedapi/core/updatebyquery/response.go index 71f56d568a..59f286760f 100644 --- a/typedapi/core/updatebyquery/response.go +++ b/typedapi/core/updatebyquery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatebyquery @@ -26,24 +26,51 @@ import ( // Response holds the response body struct for the package updatebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/update_by_query/UpdateByQueryResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/update_by_query/UpdateByQueryResponse.ts#L26-L67 type Response struct { - Batches *int64 `json:"batches,omitempty"` - Deleted *int64 `json:"deleted,omitempty"` - Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` - Noops *int64 `json:"noops,omitempty"` - RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` - Retries *types.Retries `json:"retries,omitempty"` - Task types.TaskId `json:"task,omitempty"` - Throttled types.Duration `json:"throttled,omitempty"` - ThrottledMillis *int64 `json:"throttled_millis,omitempty"` - ThrottledUntil types.Duration `json:"throttled_until,omitempty"` - ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` - TimedOut *bool `json:"timed_out,omitempty"` - Took *int64 `json:"took,omitempty"` - Total *int64 `json:"total,omitempty"` - Updated *int64 `json:"updated,omitempty"` - VersionConflicts *int64 `json:"version_conflicts,omitempty"` + + // Batches The number of scroll responses pulled back by the update by query. + Batches *int64 `json:"batches,omitempty"` + // Deleted The number of documents that were successfully deleted. + Deleted *int64 `json:"deleted,omitempty"` + // Failures Array of failures if there were any unrecoverable errors during the process. + // If this is non-empty then the request ended because of those failures. + // Update by query is implemented using batches. + // Any failure causes the entire process to end, but all failures in the current + // batch are collected into the array. + // You can use the `conflicts` option to prevent reindex from ending when + // version conflicts occur. + Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` + // Noops The number of documents that were ignored because the script used for the + // update by query returned a noop value for `ctx.op`. + Noops *int64 `json:"noops,omitempty"` + // RequestsPerSecond The number of requests per second effectively run during the update by query. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Retries The number of retries attempted by update by query. + // `bulk` is the number of bulk actions retried. + // `search` is the number of search actions retried. + Retries *types.Retries `json:"retries,omitempty"` + Task types.TaskId `json:"task,omitempty"` + Throttled types.Duration `json:"throttled,omitempty"` + // ThrottledMillis The number of milliseconds the request slept to conform to + // `requests_per_second`. + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` + ThrottledUntil types.Duration `json:"throttled_until,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in an _update_by_query response. + // It only has meaning when using the task API, where it indicates the next time + // (in milliseconds since epoch) a throttled request will be run again in order + // to conform to `requests_per_second`. + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` + // TimedOut If true, some requests timed out during the update by query. + TimedOut *bool `json:"timed_out,omitempty"` + // Took The number of milliseconds from start to end of the whole operation. + Took *int64 `json:"took,omitempty"` + // Total The number of documents that were successfully processed. + Total *int64 `json:"total,omitempty"` + // Updated The number of documents that were successfully updated. + Updated *int64 `json:"updated,omitempty"` + // VersionConflicts The number of version conflicts that the update by query hit. + VersionConflicts *int64 `json:"version_conflicts,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/core/updatebyquery/update_by_query.go b/typedapi/core/updatebyquery/update_by_query.go index e4f65cafe8..c49c72b876 100644 --- a/typedapi/core/updatebyquery/update_by_query.go +++ b/typedapi/core/updatebyquery/update_by_query.go @@ -16,13 +16,145 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Update documents. // Updates documents that match the specified query. // If no query is specified, performs an update on every document in the data // stream or index without modifying the source, which is useful for picking up // mapping changes. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `index` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// +// When you submit an update by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and updates +// matching documents using internal versioning. +// When the versions match, the document is updated and the version number is +// incremented. +// If a document changes between the time that the snapshot is taken and the +// update operation is processed, it results in a version conflict and the +// operation fails. +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts, the operation could attempt +// to update more documents from the source than `max_docs` until it has +// successfully updated `max_docs` documents or it has gone through every +// document in the source query. +// +// NOTE: Documents with a version equal to 0 cannot be updated using update by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing an update by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents. +// A bulk update request is performed for each batch of matching documents. +// Any query or update failures cause the update by query request to fail and +// the failures are shown in the response. +// Any update requests that completed successfully still stick, they are not +// rolled back. +// +// **Throttling update requests** +// +// To control the rate at which update by query issues batches of update +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to turn off throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is 1000, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single _bulk request, large batch sizes cause +// Elasticsearch to create many requests and wait before starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Update by query supports sliced scroll to parallelize the update process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` chooses a reasonable number for most data streams +// and indices. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// +// Adding `slices` to `_update_by_query` just automates the manual process of +// creating sub-requests, which means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with slices will cancel each sub-request. +// * Due to the nature of slices each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// slices are distributed proportionally to each sub-request. Combine that with +// the point above about distribution being uneven and you should conclude that +// using `max_docs` with `slices` might not result in exactly `max_docs` +// documents being updated. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many slices hurts +// performance. Setting slices higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Update performance scales linearly across available resources with the +// number of slices. +// +// Whether query or update performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Update the document source** +// +// Update by query supports scripts to update the document source. +// As with the update API, you can set `ctx.op` to change the operation that is +// performed. +// +// Set `ctx.op = "noop"` if your script decides that it doesn't have to make any +// changes. +// The update by query operation skips updating the document and increments the +// `noop` counter. +// +// Set `ctx.op = "delete"` if your script decides that the document should be +// deleted. +// The update by query operation deletes the document and increments the +// `deleted` counter. +// +// Update by query supports only `index`, `noop`, and `delete`. +// Setting `ctx.op` to anything else is an error. +// Setting any other field in `ctx` is an error. +// This API enables you to only modify the source of matching documents; you +// cannot move them. package updatebyquery import ( @@ -95,6 +227,138 @@ func NewUpdateByQueryFunc(tp elastictransport.Interface) NewUpdateByQuery { // stream or index without modifying the source, which is useful for picking up // mapping changes. // +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `index` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// +// When you submit an update by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and updates +// matching documents using internal versioning. +// When the versions match, the document is updated and the version number is +// incremented. +// If a document changes between the time that the snapshot is taken and the +// update operation is processed, it results in a version conflict and the +// operation fails. +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts, the operation could attempt +// to update more documents from the source than `max_docs` until it has +// successfully updated `max_docs` documents or it has gone through every +// document in the source query. +// +// NOTE: Documents with a version equal to 0 cannot be updated using update by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing an update by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents. +// A bulk update request is performed for each batch of matching documents. +// Any query or update failures cause the update by query request to fail and +// the failures are shown in the response. +// Any update requests that completed successfully still stick, they are not +// rolled back. +// +// **Throttling update requests** +// +// To control the rate at which update by query issues batches of update +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to turn off throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is 1000, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single _bulk request, large batch sizes cause +// Elasticsearch to create many requests and wait before starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Update by query supports sliced scroll to parallelize the update process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` chooses a reasonable number for most data streams +// and indices. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// +// Adding `slices` to `_update_by_query` just automates the manual process of +// creating sub-requests, which means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with slices will cancel each sub-request. +// * Due to the nature of slices each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// slices are distributed proportionally to each sub-request. Combine that with +// the point above about distribution being uneven and you should conclude that +// using `max_docs` with `slices` might not result in exactly `max_docs` +// documents being updated. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many slices hurts +// performance. Setting slices higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Update performance scales linearly across available resources with the +// number of slices. +// +// Whether query or update performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Update the document source** +// +// Update by query supports scripts to update the document source. +// As with the update API, you can set `ctx.op` to change the operation that is +// performed. +// +// Set `ctx.op = "noop"` if your script decides that it doesn't have to make any +// changes. +// The update by query operation skips updating the document and increments the +// `noop` counter. +// +// Set `ctx.op = "delete"` if your script decides that the document should be +// deleted. +// The update by query operation deletes the document and increments the +// `deleted` counter. +// +// Update by query supports only `index`, `noop`, and `delete`. +// Setting `ctx.op` to anything else is an error. +// Setting any other field in `ctx` is an error. +// This API enables you to only modify the source of matching documents; you +// cannot move them. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html func New(tp elastictransport.Interface) *UpdateByQuery { r := &UpdateByQuery{ @@ -103,8 +367,6 @@ func New(tp elastictransport.Interface) *UpdateByQuery { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -316,8 +578,8 @@ func (r *UpdateByQuery) Header(key, value string) *UpdateByQuery { return r } -// Index Comma-separated list of data streams, indices, and aliases to search. -// Supports wildcards (`*`). +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). // To search all data streams or indices, omit this parameter or use `*` or // `_all`. // API Name: index @@ -340,7 +602,9 @@ func (r *UpdateByQuery) AllowNoIndices(allownoindices bool) *UpdateByQuery { return r } -// Analyzer Analyzer to use for the query string. +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyzer func (r *UpdateByQuery) Analyzer(analyzer string) *UpdateByQuery { r.values.Set("analyzer", analyzer) @@ -349,6 +613,8 @@ func (r *UpdateByQuery) Analyzer(analyzer string) *UpdateByQuery { } // AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: analyze_wildcard func (r *UpdateByQuery) AnalyzeWildcard(analyzewildcard bool) *UpdateByQuery { r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) @@ -357,6 +623,8 @@ func (r *UpdateByQuery) AnalyzeWildcard(analyzewildcard bool) *UpdateByQuery { } // DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: default_operator func (r *UpdateByQuery) DefaultOperator(defaultoperator operator.Operator) *UpdateByQuery { r.values.Set("default_operator", defaultoperator.String()) @@ -364,7 +632,10 @@ func (r *UpdateByQuery) DefaultOperator(defaultoperator operator.Operator) *Upda return r } -// Df Field to use as default where no field prefix is given in the query string. +// Df The field to use as default where no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: df func (r *UpdateByQuery) Df(df string) *UpdateByQuery { r.values.Set("df", df) @@ -372,10 +643,10 @@ func (r *UpdateByQuery) Df(df string) *UpdateByQuery { return r } -// ExpandWildcards Type of index that wildcard patterns can match. +// ExpandWildcards The type of index that wildcard patterns can match. // If the request can target data streams, this argument determines whether // wildcard expressions match hidden data streams. -// Supports comma-separated values, such as `open,hidden`. +// It supports comma-separated values, such as `open,hidden`. // Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards func (r *UpdateByQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *UpdateByQuery { @@ -407,6 +678,8 @@ func (r *UpdateByQuery) IgnoreUnavailable(ignoreunavailable bool) *UpdateByQuery // Lenient If `true`, format-based query failures (such as providing text to a numeric // field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. // API name: lenient func (r *UpdateByQuery) Lenient(lenient bool) *UpdateByQuery { r.values.Set("lenient", strconv.FormatBool(lenient)) @@ -414,7 +687,7 @@ func (r *UpdateByQuery) Lenient(lenient bool) *UpdateByQuery { return r } -// Pipeline ID of the pipeline to use to preprocess incoming documents. +// Pipeline The ID of the pipeline to use to preprocess incoming documents. // If the index has a default ingest pipeline specified, then setting the value // to `_none` disables the default ingest pipeline for this request. // If a final pipeline is configured it will always run, regardless of the value @@ -426,8 +699,8 @@ func (r *UpdateByQuery) Pipeline(pipeline string) *UpdateByQuery { return r } -// Preference Specifies the node or shard the operation should be performed on. -// Random by default. +// Preference The node or shard the operation should be performed on. +// It is random by default. // API name: preference func (r *UpdateByQuery) Preference(preference string) *UpdateByQuery { r.values.Set("preference", preference) @@ -435,8 +708,18 @@ func (r *UpdateByQuery) Preference(preference string) *UpdateByQuery { return r } +// Q A query in the Lucene query string syntax. +// API name: q +func (r *UpdateByQuery) Q(q string) *UpdateByQuery { + r.values.Set("q", q) + + return r +} + // Refresh If `true`, Elasticsearch refreshes affected shards to make the operation -// visible to search. +// visible to search after the request completes. +// This is different than the update API's `refresh` parameter, which causes +// just the shard that received the request to be refreshed. // API name: refresh func (r *UpdateByQuery) Refresh(refresh bool) *UpdateByQuery { r.values.Set("refresh", strconv.FormatBool(refresh)) @@ -445,6 +728,7 @@ func (r *UpdateByQuery) Refresh(refresh bool) *UpdateByQuery { } // RequestCache If `true`, the request cache is used for this request. +// It defaults to the index-level setting. // API name: request_cache func (r *UpdateByQuery) RequestCache(requestcache bool) *UpdateByQuery { r.values.Set("request_cache", strconv.FormatBool(requestcache)) @@ -460,7 +744,7 @@ func (r *UpdateByQuery) RequestsPerSecond(requestspersecond string) *UpdateByQue return r } -// Routing Custom value used to route operations to a specific shard. +// Routing A custom value used to route operations to a specific shard. // API name: routing func (r *UpdateByQuery) Routing(routing string) *UpdateByQuery { r.values.Set("routing", routing) @@ -468,7 +752,7 @@ func (r *UpdateByQuery) Routing(routing string) *UpdateByQuery { return r } -// Scroll Period to retain the search context for scrolling. +// Scroll The period to retain the search context for scrolling. // API name: scroll func (r *UpdateByQuery) Scroll(duration string) *UpdateByQuery { r.values.Set("scroll", duration) @@ -476,7 +760,7 @@ func (r *UpdateByQuery) Scroll(duration string) *UpdateByQuery { return r } -// ScrollSize Size of the scroll request that powers the operation. +// ScrollSize The size of the scroll request that powers the operation. // API name: scroll_size func (r *UpdateByQuery) ScrollSize(scrollsize string) *UpdateByQuery { r.values.Set("scroll_size", scrollsize) @@ -484,7 +768,8 @@ func (r *UpdateByQuery) ScrollSize(scrollsize string) *UpdateByQuery { return r } -// SearchTimeout Explicit timeout for each search request. +// SearchTimeout An explicit timeout for each search request. +// By default, there is no timeout. // API name: search_timeout func (r *UpdateByQuery) SearchTimeout(duration string) *UpdateByQuery { r.values.Set("search_timeout", duration) @@ -492,8 +777,8 @@ func (r *UpdateByQuery) SearchTimeout(duration string) *UpdateByQuery { return r } -// SearchType The type of the search operation. Available options: `query_then_fetch`, -// `dfs_query_then_fetch`. +// SearchType The type of the search operation. Available options include +// `query_then_fetch` and `dfs_query_then_fetch`. // API name: search_type func (r *UpdateByQuery) SearchType(searchtype searchtype.SearchType) *UpdateByQuery { r.values.Set("search_type", searchtype.String()) @@ -521,7 +806,7 @@ func (r *UpdateByQuery) Sort(sorts ...string) *UpdateByQuery { return r } -// Stats Specific `tag` of the request for logging and statistical purposes. +// Stats The specific `tag` of the request for logging and statistical purposes. // API name: stats func (r *UpdateByQuery) Stats(stats ...string) *UpdateByQuery { tmp := []string{} @@ -533,10 +818,11 @@ func (r *UpdateByQuery) Stats(stats ...string) *UpdateByQuery { return r } -// TerminateAfter Maximum number of documents to collect for each shard. +// TerminateAfter The maximum number of documents to collect for each shard. // If a query reaches this limit, Elasticsearch terminates the query early. // Elasticsearch collects documents before sorting. -// Use with caution. +// +// IMPORTANT: Use with caution. // Elasticsearch applies this parameter to each shard handling the request. // When possible, let Elasticsearch perform early termination automatically. // Avoid specifying this parameter for requests that target data streams with @@ -548,8 +834,11 @@ func (r *UpdateByQuery) TerminateAfter(terminateafter string) *UpdateByQuery { return r } -// Timeout Period each update request waits for the following operations: dynamic +// Timeout The period each update request waits for the following operations: dynamic // mapping updates, waiting for active shards. +// By default, it is one minute. +// This guarantees Elasticsearch waits for at least the timeout before failing. +// The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout func (r *UpdateByQuery) Timeout(duration string) *UpdateByQuery { r.values.Set("timeout", duration) @@ -578,6 +867,9 @@ func (r *UpdateByQuery) VersionType(versiontype bool) *UpdateByQuery { // operation. // Set to `all` or any positive integer up to the total number of shards in the // index (`number_of_replicas+1`). +// The `timeout` parameter controls how long each write request waits for +// unavailable shards to become available. +// Both work exactly the way they work in the bulk API. // API name: wait_for_active_shards func (r *UpdateByQuery) WaitForActiveShards(waitforactiveshards string) *UpdateByQuery { r.values.Set("wait_for_active_shards", waitforactiveshards) @@ -586,6 +878,11 @@ func (r *UpdateByQuery) WaitForActiveShards(waitforactiveshards string) *UpdateB } // WaitForCompletion If `true`, the request blocks until the operation is complete. +// If `false`, Elasticsearch performs some preflight checks, launches the +// request, and returns a task ID that you can use to cancel or get the status +// of the task. +// Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. // API name: wait_for_completion func (r *UpdateByQuery) WaitForCompletion(waitforcompletion bool) *UpdateByQuery { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) @@ -637,47 +934,67 @@ func (r *UpdateByQuery) Pretty(pretty bool) *UpdateByQuery { return r } -// Conflicts What to do if update by query hits version conflicts: `abort` or `proceed`. +// The preferred behavior when update by query hits version conflicts: `abort` +// or `proceed`. // API name: conflicts func (r *UpdateByQuery) Conflicts(conflicts conflicts.Conflicts) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Conflicts = &conflicts - return r } -// MaxDocs The maximum number of documents to update. +// The maximum number of documents to update. // API name: max_docs func (r *UpdateByQuery) MaxDocs(maxdocs int64) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxDocs = &maxdocs return r } -// Query Specifies the documents to update using the Query DSL. +// The documents to update using the Query DSL. // API name: query -func (r *UpdateByQuery) Query(query *types.Query) *UpdateByQuery { +func (r *UpdateByQuery) Query(query types.QueryVariant) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Script The script to run to update the document source or metadata when updating. +// The script to run to update the document source or metadata when updating. // API name: script -func (r *UpdateByQuery) Script(script *types.Script) *UpdateByQuery { +func (r *UpdateByQuery) Script(script types.ScriptVariant) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Script = script + r.req.Script = script.ScriptCaster() return r } -// Slice Slice the request manually using the provided slice ID and total number of +// Slice the request manually using the provided slice ID and total number of // slices. // API name: slice -func (r *UpdateByQuery) Slice(slice *types.SlicedScroll) *UpdateByQuery { +func (r *UpdateByQuery) Slice(slice types.SlicedScrollVariant) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } diff --git a/typedapi/core/updatebyqueryrethrottle/response.go b/typedapi/core/updatebyqueryrethrottle/response.go index 2311c0fbc2..84beb3d8b4 100644 --- a/typedapi/core/updatebyqueryrethrottle/response.go +++ b/typedapi/core/updatebyqueryrethrottle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatebyqueryrethrottle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatebyqueryrethrottle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleResponse.ts#L23-L25 type Response struct { Nodes map[string]types.UpdateByQueryRethrottleNode `json:"nodes"` } diff --git a/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go b/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go index 80293e8a26..539654e0c6 100644 --- a/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go +++ b/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go @@ -16,10 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Changes the number of requests per second for a particular Update By Query +// Throttle an update by query operation. +// +// Change the number of requests per second for a particular update by query // operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. package updatebyqueryrethrottle import ( @@ -77,10 +82,15 @@ func NewUpdateByQueryRethrottleFunc(tp elastictransport.Interface) NewUpdateByQu } } -// Changes the number of requests per second for a particular Update By Query +// Throttle an update by query operation. +// +// Change the number of requests per second for a particular update by query // operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html#docs-update-by-query-rethrottle func New(tp elastictransport.Interface) *UpdateByQueryRethrottle { r := &UpdateByQueryRethrottle{ transport: tp, @@ -302,6 +312,7 @@ func (r *UpdateByQueryRethrottle) _taskid(taskid string) *UpdateByQueryRethrottl } // RequestsPerSecond The throttle for this request in sub-requests per second. +// To turn off throttling, set it to `-1`. // API name: requests_per_second func (r *UpdateByQueryRethrottle) RequestsPerSecond(requestspersecond string) *UpdateByQueryRethrottle { r.values.Set("requests_per_second", requestspersecond) diff --git a/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go b/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go index 0d6b48791d..8a417549cc 100644 --- a/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go +++ b/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go @@ -16,9 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Deletes the specified dangling index +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Delete a dangling index. +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. package deletedanglingindex import ( @@ -76,9 +81,14 @@ func NewDeleteDanglingIndexFunc(tp elastictransport.Interface) NewDeleteDangling } } -// Deletes the specified dangling index +// Delete a dangling index. +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/dangling-index-delete.html func New(tp elastictransport.Interface) *DeleteDanglingIndex { r := &DeleteDanglingIndex{ transport: tp, @@ -288,7 +298,8 @@ func (r *DeleteDanglingIndex) Header(key, value string) *DeleteDanglingIndex { return r } -// IndexUuid The UUID of the dangling index +// IndexUuid The UUID of the index to delete. Use the get dangling indices API to find the +// UUID. // API Name: indexuuid func (r *DeleteDanglingIndex) _indexuuid(indexuuid string) *DeleteDanglingIndex { r.paramSet |= indexuuidMask @@ -297,7 +308,8 @@ func (r *DeleteDanglingIndex) _indexuuid(indexuuid string) *DeleteDanglingIndex return r } -// AcceptDataLoss Must be set to true in order to delete the dangling index +// AcceptDataLoss This parameter must be set to true to acknowledge that it will no longer be +// possible to recove data from the dangling index. // API name: accept_data_loss func (r *DeleteDanglingIndex) AcceptDataLoss(acceptdataloss bool) *DeleteDanglingIndex { r.values.Set("accept_data_loss", strconv.FormatBool(acceptdataloss)) diff --git a/typedapi/danglingindices/deletedanglingindex/response.go b/typedapi/danglingindices/deletedanglingindex/response.go index 7063cecc10..f9a4df42db 100644 --- a/typedapi/danglingindices/deletedanglingindex/response.go +++ b/typedapi/danglingindices/deletedanglingindex/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletedanglingindex // Response holds the response body struct for the package deletedanglingindex // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/danglingindices/importdanglingindex/import_dangling_index.go b/typedapi/danglingindices/importdanglingindex/import_dangling_index.go index d0a0f8b4ac..9241edc641 100644 --- a/typedapi/danglingindices/importdanglingindex/import_dangling_index.go +++ b/typedapi/danglingindices/importdanglingindex/import_dangling_index.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Imports the specified dangling index +// Import a dangling index. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. package importdanglingindex import ( @@ -76,9 +82,15 @@ func NewImportDanglingIndexFunc(tp elastictransport.Interface) NewImportDangling } } -// Imports the specified dangling index +// Import a dangling index. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/dangling-index-import.html func New(tp elastictransport.Interface) *ImportDanglingIndex { r := &ImportDanglingIndex{ transport: tp, @@ -288,7 +300,8 @@ func (r *ImportDanglingIndex) Header(key, value string) *ImportDanglingIndex { return r } -// IndexUuid The UUID of the dangling index +// IndexUuid The UUID of the index to import. Use the get dangling indices API to locate +// the UUID. // API Name: indexuuid func (r *ImportDanglingIndex) _indexuuid(indexuuid string) *ImportDanglingIndex { r.paramSet |= indexuuidMask @@ -297,7 +310,11 @@ func (r *ImportDanglingIndex) _indexuuid(indexuuid string) *ImportDanglingIndex return r } -// AcceptDataLoss Must be set to true in order to import the dangling index +// AcceptDataLoss This parameter must be set to true to import a dangling index. +// Because Elasticsearch cannot know where the dangling index data came from or +// determine which shard copies are fresh and which are stale, it cannot +// guarantee that the imported data represents the latest state of the index +// when it was last in the cluster. // API name: accept_data_loss func (r *ImportDanglingIndex) AcceptDataLoss(acceptdataloss bool) *ImportDanglingIndex { r.values.Set("accept_data_loss", strconv.FormatBool(acceptdataloss)) diff --git a/typedapi/danglingindices/importdanglingindex/response.go b/typedapi/danglingindices/importdanglingindex/response.go index e0bdc2bc50..15a0aabc92 100644 --- a/typedapi/danglingindices/importdanglingindex/response.go +++ b/typedapi/danglingindices/importdanglingindex/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package importdanglingindex // Response holds the response body struct for the package importdanglingindex // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go b/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go index b01814a660..234353559a 100644 --- a/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go +++ b/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go @@ -16,9 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns all dangling indices. +// Get the dangling indices. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +// +// Use this API to list dangling indices, which you can then import or delete. package listdanglingindices import ( @@ -68,9 +76,17 @@ func NewListDanglingIndicesFunc(tp elastictransport.Interface) NewListDanglingIn } } -// Returns all dangling indices. +// Get the dangling indices. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +// +// Use this API to list dangling indices, which you can then import or delete. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/dangling-indices-list.html func New(tp elastictransport.Interface) *ListDanglingIndices { r := &ListDanglingIndices{ transport: tp, diff --git a/typedapi/danglingindices/listdanglingindices/response.go b/typedapi/danglingindices/listdanglingindices/response.go index f02ad3c28f..7bf3db46e6 100644 --- a/typedapi/danglingindices/listdanglingindices/response.go +++ b/typedapi/danglingindices/listdanglingindices/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package listdanglingindices @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package listdanglingindices // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L23-L27 type Response struct { DanglingIndices []types.DanglingIndex `json:"dangling_indices"` } diff --git a/typedapi/enrich/deletepolicy/delete_policy.go b/typedapi/enrich/deletepolicy/delete_policy.go index f6279f635e..b5f290404e 100644 --- a/typedapi/enrich/deletepolicy/delete_policy.go +++ b/typedapi/enrich/deletepolicy/delete_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete an enrich policy. // Deletes an existing enrich policy and its enrich index. @@ -301,6 +301,14 @@ func (r *DeletePolicy) _name(name string) *DeletePolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *DeletePolicy) MasterTimeout(duration string) *DeletePolicy { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/enrich/deletepolicy/response.go b/typedapi/enrich/deletepolicy/response.go index 4e9e9cdfc7..c3895f5893 100644 --- a/typedapi/enrich/deletepolicy/response.go +++ b/typedapi/enrich/deletepolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletepolicy // Response holds the response body struct for the package deletepolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/delete_policy/DeleteEnrichPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/delete_policy/DeleteEnrichPolicyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/enrich/executepolicy/execute_policy.go b/typedapi/enrich/executepolicy/execute_policy.go index 5745e0cd09..47e9978dc9 100644 --- a/typedapi/enrich/executepolicy/execute_policy.go +++ b/typedapi/enrich/executepolicy/execute_policy.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates the enrich index for an existing enrich policy. +// Run an enrich policy. +// Create the enrich index for an existing enrich policy. package executepolicy import ( @@ -76,7 +77,8 @@ func NewExecutePolicyFunc(tp elastictransport.Interface) NewExecutePolicy { } } -// Creates the enrich index for an existing enrich policy. +// Run an enrich policy. +// Create the enrich index for an existing enrich policy. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html func New(tp elastictransport.Interface) *ExecutePolicy { @@ -301,6 +303,14 @@ func (r *ExecutePolicy) _name(name string) *ExecutePolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *ExecutePolicy) MasterTimeout(duration string) *ExecutePolicy { + r.values.Set("master_timeout", duration) + + return r +} + // WaitForCompletion If `true`, the request blocks other enrich policy execution requests until // complete. // API name: wait_for_completion diff --git a/typedapi/enrich/executepolicy/response.go b/typedapi/enrich/executepolicy/response.go index 0054ed1819..408b018b6f 100644 --- a/typedapi/enrich/executepolicy/response.go +++ b/typedapi/enrich/executepolicy/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package executepolicy @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package executepolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/execute_policy/ExecuteEnrichPolicyResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/execute_policy/ExecuteEnrichPolicyResponse.ts#L23-L28 type Response struct { Status *types.ExecuteEnrichPolicyStatus `json:"status,omitempty"` TaskId types.TaskId `json:"task_id,omitempty"` diff --git a/typedapi/enrich/getpolicy/get_policy.go b/typedapi/enrich/getpolicy/get_policy.go index 5fb660ad17..a8a7a3f158 100644 --- a/typedapi/enrich/getpolicy/get_policy.go +++ b/typedapi/enrich/getpolicy/get_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get an enrich policy. // Returns information about an enrich policy. @@ -307,6 +307,14 @@ func (r *GetPolicy) Name(name string) *GetPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *GetPolicy) MasterTimeout(duration string) *GetPolicy { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/enrich/getpolicy/response.go b/typedapi/enrich/getpolicy/response.go index b6819e6a14..fab4e7fb1d 100644 --- a/typedapi/enrich/getpolicy/response.go +++ b/typedapi/enrich/getpolicy/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getpolicy @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/get_policy/GetEnrichPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/get_policy/GetEnrichPolicyResponse.ts#L22-L24 type Response struct { Policies []types.Summary `json:"policies"` } diff --git a/typedapi/enrich/putpolicy/put_policy.go b/typedapi/enrich/putpolicy/put_policy.go index de4638ba44..79a528145b 100644 --- a/typedapi/enrich/putpolicy/put_policy.go +++ b/typedapi/enrich/putpolicy/put_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create an enrich policy. // Creates an enrich policy. @@ -93,8 +93,6 @@ func New(tp elastictransport.Interface) *PutPolicy { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -317,6 +315,14 @@ func (r *PutPolicy) _name(name string) *PutPolicy { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PutPolicy) MasterTimeout(duration string) *PutPolicy { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -361,30 +367,42 @@ func (r *PutPolicy) Pretty(pretty bool) *PutPolicy { return r } -// GeoMatch Matches enrich data to incoming documents based on a `geo_shape` query. +// Matches enrich data to incoming documents based on a `geo_shape` query. // API name: geo_match -func (r *PutPolicy) GeoMatch(geomatch *types.EnrichPolicy) *PutPolicy { +func (r *PutPolicy) GeoMatch(geomatch types.EnrichPolicyVariant) *PutPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.GeoMatch = geomatch + r.req.GeoMatch = geomatch.EnrichPolicyCaster() return r } -// Match Matches enrich data to incoming documents based on a `term` query. +// Matches enrich data to incoming documents based on a `term` query. // API name: match -func (r *PutPolicy) Match(match *types.EnrichPolicy) *PutPolicy { +func (r *PutPolicy) Match(match types.EnrichPolicyVariant) *PutPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Match = match + r.req.Match = match.EnrichPolicyCaster() return r } -// Range Matches a number, date, or IP address in incoming documents to a range in the +// Matches a number, date, or IP address in incoming documents to a range in the // enrich index based on a `term` query. // API name: range -func (r *PutPolicy) Range(range_ *types.EnrichPolicy) *PutPolicy { +func (r *PutPolicy) Range(range_ types.EnrichPolicyVariant) *PutPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Range = range_ + r.req.Range = range_.EnrichPolicyCaster() return r } diff --git a/typedapi/enrich/putpolicy/request.go b/typedapi/enrich/putpolicy/request.go index 7e77a4217d..b3e90eb59d 100644 --- a/typedapi/enrich/putpolicy/request.go +++ b/typedapi/enrich/putpolicy/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putpolicy @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/put_policy/PutEnrichPolicyRequest.ts#L24-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/put_policy/PutEnrichPolicyRequest.ts#L25-L67 type Request struct { // GeoMatch Matches enrich data to incoming documents based on a `geo_shape` query. diff --git a/typedapi/enrich/putpolicy/response.go b/typedapi/enrich/putpolicy/response.go index a2efa39e76..7435d5f053 100644 --- a/typedapi/enrich/putpolicy/response.go +++ b/typedapi/enrich/putpolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putpolicy // Response holds the response body struct for the package putpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/put_policy/PutEnrichPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/put_policy/PutEnrichPolicyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/enrich/stats/response.go b/typedapi/enrich/stats/response.go index 39f98b3285..20f4218553 100644 --- a/typedapi/enrich/stats/response.go +++ b/typedapi/enrich/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/stats/EnrichStatsResponse.ts#L22-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/stats/EnrichStatsResponse.ts#L22-L39 type Response struct { // CacheStats Objects containing information about the enrich cache stats on each ingest diff --git a/typedapi/enrich/stats/stats.go b/typedapi/enrich/stats/stats.go index 8f151671fa..3ff6666a99 100644 --- a/typedapi/enrich/stats/stats.go +++ b/typedapi/enrich/stats/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get enrich stats. // Returns enrich coordinator statistics and information about enrich policies @@ -280,6 +280,14 @@ func (r *Stats) Header(key, value string) *Stats { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Stats) MasterTimeout(duration string) *Stats { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/eql/delete/delete.go b/typedapi/eql/delete/delete.go index 5883882669..00ebdb1be7 100644 --- a/typedapi/eql/delete/delete.go +++ b/typedapi/eql/delete/delete.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes an async EQL search or a stored synchronous EQL search. +// Delete an async EQL search. +// Delete an async EQL search or a stored synchronous EQL search. // The API also deletes results for the search. package delete @@ -77,10 +78,11 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } } -// Deletes an async EQL search or a stored synchronous EQL search. +// Delete an async EQL search. +// Delete an async EQL search or a stored synchronous EQL search. // The API also deletes results for the search. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-eql-delete func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, diff --git a/typedapi/eql/delete/response.go b/typedapi/eql/delete/response.go index 0a6648e998..46caf683d5 100644 --- a/typedapi/eql/delete/response.go +++ b/typedapi/eql/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/eql/delete/EqlDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/eql/delete/EqlDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/eql/get/get.go b/typedapi/eql/get/get.go index 88ebae449a..7a16c5615e 100644 --- a/typedapi/eql/get/get.go +++ b/typedapi/eql/get/get.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the current status and available results for an async EQL search or a +// Get async EQL search results. +// Get the current status and available results for an async EQL search or a // stored synchronous EQL search. package get @@ -77,7 +78,8 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } } -// Returns the current status and available results for an async EQL search or a +// Get async EQL search results. +// Get the current status and available results for an async EQL search or a // stored synchronous EQL search. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-search-api.html diff --git a/typedapi/eql/get/response.go b/typedapi/eql/get/response.go index bfc08c1828..fbfee0fe1c 100644 --- a/typedapi/eql/get/response.go +++ b/typedapi/eql/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/eql/get/EqlGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/eql/get/EqlGetResponse.ts#L22-L24 type Response struct { // Hits Contains matching events and sequences. Also contains related metadata. @@ -37,6 +37,9 @@ type Response struct { IsPartial *bool `json:"is_partial,omitempty"` // IsRunning If true, the search request is still executing. IsRunning *bool `json:"is_running,omitempty"` + // ShardFailures Contains information about shard failures (if any), in case + // allow_partial_search_results=true + ShardFailures []types.ShardFailure `json:"shard_failures,omitempty"` // TimedOut If true, the request timed out before completion. TimedOut *bool `json:"timed_out,omitempty"` // Took Milliseconds it took Elasticsearch to execute the request. diff --git a/typedapi/eql/getstatus/get_status.go b/typedapi/eql/getstatus/get_status.go index 0895d44bc6..8801316593 100644 --- a/typedapi/eql/getstatus/get_status.go +++ b/typedapi/eql/getstatus/get_status.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the current status for an async EQL search or a stored synchronous -// EQL search without returning results. +// Get the async EQL status. +// Get the current status for an async EQL search or a stored synchronous EQL +// search without returning results. package getstatus import ( @@ -77,8 +78,9 @@ func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { } } -// Returns the current status for an async EQL search or a stored synchronous -// EQL search without returning results. +// Get the async EQL status. +// Get the current status for an async EQL search or a stored synchronous EQL +// search without returning results. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-status-api.html func New(tp elastictransport.Interface) *GetStatus { diff --git a/typedapi/eql/getstatus/response.go b/typedapi/eql/getstatus/response.go index ac90878a12..86ff9505fe 100644 --- a/typedapi/eql/getstatus/response.go +++ b/typedapi/eql/getstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getstatus // Response holds the response body struct for the package getstatus // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/eql/get_status/EqlGetStatusResponse.ts#L24-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/eql/get_status/EqlGetStatusResponse.ts#L24-L51 type Response struct { // CompletionStatus For a completed search shows the http status code of the completed search. diff --git a/typedapi/eql/search/request.go b/typedapi/eql/search/request.go index 9d09714e6e..749e54fc0b 100644 --- a/typedapi/eql/search/request.go +++ b/typedapi/eql/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package search @@ -34,9 +34,23 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/eql/search/EqlSearchRequest.ts#L28-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/eql/search/EqlSearchRequest.ts#L28-L161 type Request struct { - CaseSensitive *bool `json:"case_sensitive,omitempty"` + + // AllowPartialSearchResults Allow query execution also in case of shard failures. + // If true, the query will keep running and will return results based on the + // available shards. + // For sequences, the behavior can be further refined using + // allow_partial_sequence_results + AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` + // AllowPartialSequenceResults This flag applies only to sequences and has effect only if + // allow_partial_search_results=true. + // If true, the sequence query will return results based on the available + // shards, ignoring the others. + // If false, the sequence query will return successfully, but will always have + // empty results. + AllowPartialSequenceResults *bool `json:"allow_partial_sequence_results,omitempty"` + CaseSensitive *bool `json:"case_sensitive,omitempty"` // EventCategoryField Field containing the event classification, such as process, file, or network. EventCategoryField *string `json:"event_category_field,omitempty"` // FetchSize Maximum number of events to search at a time for sequence queries. @@ -49,6 +63,12 @@ type Request struct { Filter []types.Query `json:"filter,omitempty"` KeepAlive types.Duration `json:"keep_alive,omitempty"` KeepOnCompletion *bool `json:"keep_on_completion,omitempty"` + // MaxSamplesPerKey By default, the response of a sample query contains up to `10` samples, with + // one sample per unique set of join keys. Use the `size` + // parameter to get a smaller or larger set of samples. To retrieve more than + // one sample per set of join keys, use the + // `max_samples_per_key` parameter. Pipes are not supported for sample queries. + MaxSamplesPerKey *int `json:"max_samples_per_key,omitempty"` // Query EQL query you wish to run. Query string `json:"query"` ResultPosition *resultposition.ResultPosition `json:"result_position,omitempty"` @@ -96,6 +116,34 @@ func (s *Request) UnmarshalJSON(data []byte) error { switch t { + case "allow_partial_search_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSearchResults", err) + } + s.AllowPartialSearchResults = &value + case bool: + s.AllowPartialSearchResults = &v + } + + case "allow_partial_sequence_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSequenceResults", err) + } + s.AllowPartialSequenceResults = &value + case bool: + s.AllowPartialSequenceResults = &v + } + case "case_sensitive": var tmp any dec.Decode(&tmp) @@ -171,6 +219,22 @@ func (s *Request) UnmarshalJSON(data []byte) error { s.KeepOnCompletion = &v } + case "max_samples_per_key": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSamplesPerKey", err) + } + s.MaxSamplesPerKey = &value + case float64: + f := int(v) + s.MaxSamplesPerKey = &f + } + case "query": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { diff --git a/typedapi/eql/search/response.go b/typedapi/eql/search/response.go index 62a849fb97..40f28ad3c9 100644 --- a/typedapi/eql/search/response.go +++ b/typedapi/eql/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package search @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/eql/search/EqlSearchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/eql/search/EqlSearchResponse.ts#L22-L24 type Response struct { // Hits Contains matching events and sequences. Also contains related metadata. @@ -37,6 +37,9 @@ type Response struct { IsPartial *bool `json:"is_partial,omitempty"` // IsRunning If true, the search request is still executing. IsRunning *bool `json:"is_running,omitempty"` + // ShardFailures Contains information about shard failures (if any), in case + // allow_partial_search_results=true + ShardFailures []types.ShardFailure `json:"shard_failures,omitempty"` // TimedOut If true, the request timed out before completion. TimedOut *bool `json:"timed_out,omitempty"` // Took Milliseconds it took Elasticsearch to execute the request. diff --git a/typedapi/eql/search/search.go b/typedapi/eql/search/search.go index ee52fcc9c0..0ce167a8fa 100644 --- a/typedapi/eql/search/search.go +++ b/typedapi/eql/search/search.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns results matching a query expressed in Event Query Language (EQL) +// Get EQL search results. +// Returns search results for an Event Query Language (EQL) query. +// EQL assumes each document in a data stream or index corresponds to an event. package search import ( @@ -83,7 +85,9 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { } } -// Returns results matching a query expressed in Event Query Language (EQL) +// Get EQL search results. +// Returns search results for an Event Query Language (EQL) query. +// EQL assumes each document in a data stream or index corresponds to an event. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html func New(tp elastictransport.Interface) *Search { @@ -93,8 +97,6 @@ func New(tp elastictransport.Interface) *Search { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -387,65 +389,159 @@ func (r *Search) Pretty(pretty bool) *Search { return r } +// Allow query execution also in case of shard failures. +// If true, the query will keep running and will return results based on the +// available shards. +// For sequences, the behavior can be further refined using +// allow_partial_sequence_results +// API name: allow_partial_search_results +func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowPartialSearchResults = &allowpartialsearchresults + + return r +} + +// This flag applies only to sequences and has effect only if +// allow_partial_search_results=true. +// If true, the sequence query will return results based on the available +// shards, ignoring the others. +// If false, the sequence query will return successfully, but will always have +// empty results. +// API name: allow_partial_sequence_results +func (r *Search) AllowPartialSequenceResults(allowpartialsequenceresults bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowPartialSequenceResults = &allowpartialsequenceresults + + return r +} + // API name: case_sensitive func (r *Search) CaseSensitive(casesensitive bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CaseSensitive = &casesensitive return r } -// EventCategoryField Field containing the event classification, such as process, file, or network. +// Field containing the event classification, such as process, file, or network. // API name: event_category_field func (r *Search) EventCategoryField(field string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.EventCategoryField = &field return r } -// FetchSize Maximum number of events to search at a time for sequence queries. +// Maximum number of events to search at a time for sequence queries. // API name: fetch_size func (r *Search) FetchSize(fetchsize uint) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.FetchSize = &fetchsize return r } -// Fields Array of wildcard (*) patterns. The response returns values for field names +// Array of wildcard (*) patterns. The response returns values for field names // matching these patterns in the fields property of each hit. // API name: fields -func (r *Search) Fields(fields ...types.FieldAndFormat) *Search { - r.req.Fields = fields +func (r *Search) Fields(fields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Fields = make([]types.FieldAndFormat, len(fields)) + for i, v := range fields { + r.req.Fields[i] = *v.FieldAndFormatCaster() + } return r } -// Filter Query, written in Query DSL, used to filter the events on which the EQL query +// Query, written in Query DSL, used to filter the events on which the EQL query // runs. // API name: filter -func (r *Search) Filter(filters ...types.Query) *Search { - r.req.Filter = filters +func (r *Search) Filter(filters ...types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + r.req.Filter[i] = *v.QueryCaster() + } return r } // API name: keep_alive -func (r *Search) KeepAlive(duration types.Duration) *Search { - r.req.KeepAlive = duration +func (r *Search) KeepAlive(duration types.DurationVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepAlive = *duration.DurationCaster() return r } // API name: keep_on_completion func (r *Search) KeepOnCompletion(keeponcompletion bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.KeepOnCompletion = &keeponcompletion return r } -// Query EQL query you wish to run. +// By default, the response of a sample query contains up to `10` samples, with +// one sample per unique set of join keys. Use the `size` +// parameter to get a smaller or larger set of samples. To retrieve more than +// one sample per set of join keys, use the +// `max_samples_per_key` parameter. Pipes are not supported for sample queries. +// API name: max_samples_per_key +func (r *Search) MaxSamplesPerKey(maxsamplesperkey int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxSamplesPerKey = &maxsamplesperkey + + return r +} + +// EQL query you wish to run. // API name: query func (r *Search) Query(query string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Query = query @@ -454,47 +550,74 @@ func (r *Search) Query(query string) *Search { // API name: result_position func (r *Search) ResultPosition(resultposition resultposition.ResultPosition) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ResultPosition = &resultposition - return r } // API name: runtime_mappings -func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search { - r.req.RuntimeMappings = runtimefields +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// Size For basic queries, the maximum number of matching events to return. Defaults +// For basic queries, the maximum number of matching events to return. Defaults // to 10 // API name: size func (r *Search) Size(size uint) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Size = &size return r } -// TiebreakerField Field used to sort hits with the same timestamp in ascending order +// Field used to sort hits with the same timestamp in ascending order // API name: tiebreaker_field func (r *Search) TiebreakerField(field string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TiebreakerField = &field return r } -// TimestampField Field containing event timestamp. Default "@timestamp" +// Field containing event timestamp. Default "@timestamp" // API name: timestamp_field func (r *Search) TimestampField(field string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TimestampField = &field return r } // API name: wait_for_completion_timeout -func (r *Search) WaitForCompletionTimeout(duration types.Duration) *Search { - r.req.WaitForCompletionTimeout = duration +func (r *Search) WaitForCompletionTimeout(duration types.DurationVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WaitForCompletionTimeout = *duration.DurationCaster() return r } diff --git a/typedapi/esql/asyncquery/async_query.go b/typedapi/esql/asyncquery/async_query.go index fd0dc9a824..cfc2d5321a 100644 --- a/typedapi/esql/asyncquery/async_query.go +++ b/typedapi/esql/asyncquery/async_query.go @@ -16,21 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Executes an ESQL request asynchronously +// Run an async ES|QL query. +// Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its +// progress, and retrieve results when they become available. +// +// The API accepts the same parameters and request body as the synchronous query +// API, along with additional async related properties. package asyncquery import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/esqlformat" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +55,10 @@ type AsyncQuery struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,7 +79,12 @@ func NewAsyncQueryFunc(tp elastictransport.Interface) NewAsyncQuery { } } -// Executes an ESQL request asynchronously +// Run an async ES|QL query. +// Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its +// progress, and retrieve results when they become available. +// +// The API accepts the same parameters and request body as the synchronous query +// API, along with additional async related properties. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-api.html func New(tp elastictransport.Interface) *AsyncQuery { @@ -73,6 +92,8 @@ func New(tp elastictransport.Interface) *AsyncQuery { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +105,21 @@ func New(tp elastictransport.Interface) *AsyncQuery { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *AsyncQuery) Raw(raw io.Reader) *AsyncQuery { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *AsyncQuery) Request(req *Request) *AsyncQuery { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *AsyncQuery) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +129,31 @@ func (r *AsyncQuery) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for AsyncQuery: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -180,13 +241,7 @@ func (r AsyncQuery) Perform(providedCtx context.Context) (*http.Response, error) } // Do runs the request through the transport, handle the response and returns a asyncquery.Response -func (r AsyncQuery) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r AsyncQuery) IsSuccess(providedCtx context.Context) (bool, error) { +func (r AsyncQuery) Do(providedCtx context.Context) (Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -197,30 +252,46 @@ func (r AsyncQuery) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the AsyncQuery query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err } - return false, nil + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the AsyncQuery headers map. @@ -229,3 +300,232 @@ func (r *AsyncQuery) Header(key, value string) *AsyncQuery { return r } + +// Delimiter The character to use between values within a CSV row. +// It is valid only for the CSV format. +// API name: delimiter +func (r *AsyncQuery) Delimiter(delimiter string) *AsyncQuery { + r.values.Set("delimiter", delimiter) + + return r +} + +// DropNullColumns Indicates whether columns that are entirely `null` will be removed from the +// `columns` and `values` portion of the results. +// If `true`, the response will include an extra section under the name +// `all_columns` which has the name of all the columns. +// API name: drop_null_columns +func (r *AsyncQuery) DropNullColumns(dropnullcolumns bool) *AsyncQuery { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// Format A short version of the Accept header, for example `json` or `yaml`. +// API name: format +func (r *AsyncQuery) Format(format esqlformat.EsqlFormat) *AsyncQuery { + r.values.Set("format", format.String()) + + return r +} + +// KeepAlive The period for which the query and its results are stored in the cluster. +// The default period is five days. +// When this period expires, the query and its results are deleted, even if the +// query is still ongoing. +// If the `keep_on_completion` parameter is false, Elasticsearch only stores +// async queries that do not complete within the period set by the +// `wait_for_completion_timeout` parameter, regardless of this value. +// API name: keep_alive +func (r *AsyncQuery) KeepAlive(duration string) *AsyncQuery { + r.values.Set("keep_alive", duration) + + return r +} + +// KeepOnCompletion Indicates whether the query and its results are stored in the cluster. +// If false, the query and its results are stored in the cluster only if the +// request does not complete during the period set by the +// `wait_for_completion_timeout` parameter. +// API name: keep_on_completion +func (r *AsyncQuery) KeepOnCompletion(keeponcompletion bool) *AsyncQuery { + r.values.Set("keep_on_completion", strconv.FormatBool(keeponcompletion)) + + return r +} + +// WaitForCompletionTimeout The period to wait for the request to finish. +// By default, the request waits for 1 second for the query results. +// If the query completes during this period, results are returned +// Otherwise, a query ID is returned that can later be used to retrieve the +// results. +// API name: wait_for_completion_timeout +func (r *AsyncQuery) WaitForCompletionTimeout(duration string) *AsyncQuery { + r.values.Set("wait_for_completion_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQuery) ErrorTrace(errortrace bool) *AsyncQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQuery) FilterPath(filterpaths ...string) *AsyncQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQuery) Human(human bool) *AsyncQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQuery) Pretty(pretty bool) *AsyncQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// By default, ES|QL returns results as rows. For example, FROM returns each +// individual document as one row. For the JSON, YAML, CBOR and smile formats, +// ES|QL can return the results in a columnar fashion where one row represents +// all the values of a certain column in the results. +// API name: columnar +func (r *AsyncQuery) Columnar(columnar bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Columnar = &columnar + + return r +} + +// Specify a Query DSL query in the filter parameter to filter the set of +// documents that an ES|QL query runs on. +// API name: filter +func (r *AsyncQuery) Filter(filter types.QueryVariant) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.QueryCaster() + + return r +} + +// When set to `true` and performing a cross-cluster query, the response will +// include an extra `_clusters` +// object with information about the clusters that participated in the search +// along with info such as shards +// count. +// API name: include_ccs_metadata +func (r *AsyncQuery) IncludeCcsMetadata(includeccsmetadata bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IncludeCcsMetadata = &includeccsmetadata + + return r +} + +// API name: locale +func (r *AsyncQuery) Locale(locale string) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Locale = &locale + + return r +} + +// To avoid any attempts of hacking or code injection, extract the values in a +// separate list of parameters. Use question mark placeholders (?) in the query +// string for each of the parameters. +// API name: params +func (r *AsyncQuery) Params(params ...types.FieldValueVariant) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range params { + + r.req.Params = append(r.req.Params, *v.FieldValueCaster()) + + } + return r +} + +// If provided and `true` the response will include an extra `profile` object +// with information on how the query was executed. This information is for human +// debugging +// and its format can change at any time but it can give some insight into the +// performance +// of each part of the query. +// API name: profile +func (r *AsyncQuery) Profile(profile bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Profile = &profile + + return r +} + +// The ES|QL query API accepts an ES|QL query string in the query parameter, +// runs it, and returns the results. +// API name: query +func (r *AsyncQuery) Query(query string) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query + + return r +} + +// Tables to use with the LOOKUP operation. The top level key is the table +// name and the next level key is the column name. +// API name: tables +func (r *AsyncQuery) Tables(tables map[string]map[string]types.TableValuesContainer) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Tables = tables + return r +} diff --git a/typedapi/esql/asyncquery/request.go b/typedapi/esql/asyncquery/request.go new file mode 100644 index 0000000000..a21aa72e96 --- /dev/null +++ b/typedapi/esql/asyncquery/request.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package asyncquery + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package asyncquery +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/async_query/AsyncQueryRequest.ts#L28-L125 +type Request struct { + + // Columnar By default, ES|QL returns results as rows. For example, FROM returns each + // individual document as one row. For the JSON, YAML, CBOR and smile formats, + // ES|QL can return the results in a columnar fashion where one row represents + // all the values of a certain column in the results. + Columnar *bool `json:"columnar,omitempty"` + // Filter Specify a Query DSL query in the filter parameter to filter the set of + // documents that an ES|QL query runs on. + Filter *types.Query `json:"filter,omitempty"` + // IncludeCcsMetadata When set to `true` and performing a cross-cluster query, the response will + // include an extra `_clusters` + // object with information about the clusters that participated in the search + // along with info such as shards + // count. + IncludeCcsMetadata *bool `json:"include_ccs_metadata,omitempty"` + Locale *string `json:"locale,omitempty"` + // Params To avoid any attempts of hacking or code injection, extract the values in a + // separate list of parameters. Use question mark placeholders (?) in the query + // string for each of the parameters. + Params []types.FieldValue `json:"params,omitempty"` + // Profile If provided and `true` the response will include an extra `profile` object + // with information on how the query was executed. This information is for human + // debugging + // and its format can change at any time but it can give some insight into the + // performance + // of each part of the query. + Profile *bool `json:"profile,omitempty"` + // Query The ES|QL query API accepts an ES|QL query string in the query parameter, + // runs it, and returns the results. + Query string `json:"query"` + // Tables Tables to use with the LOOKUP operation. The top level key is the table + // name and the next level key is the column name. + Tables map[string]map[string]types.TableValuesContainer `json:"tables,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Tables: make(map[string]map[string]types.TableValuesContainer, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Asyncquery request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/esql/asyncquery/response.go b/typedapi/esql/asyncquery/response.go new file mode 100644 index 0000000000..160d4df3f1 --- /dev/null +++ b/typedapi/esql/asyncquery/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package asyncquery + +// Response holds the response body struct for the package asyncquery +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/async_query/AsyncQueryResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/esql/asyncquerydelete/async_query_delete.go b/typedapi/esql/asyncquerydelete/async_query_delete.go new file mode 100644 index 0000000000..bb49d0fde7 --- /dev/null +++ b/typedapi/esql/asyncquerydelete/async_query_delete.go @@ -0,0 +1,364 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Delete an async ES|QL query. +// If the query is still running, it is cancelled. +// Otherwise, the stored results are deleted. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a query: +// +// * The authenticated user that submitted the original query request +// * Users with the `cancel_task` cluster privilege +package asyncquerydelete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQueryDelete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQueryDelete type alias for index. +type NewAsyncQueryDelete func(id string) *AsyncQueryDelete + +// NewAsyncQueryDeleteFunc returns a new instance of AsyncQueryDelete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryDeleteFunc(tp elastictransport.Interface) NewAsyncQueryDelete { + return func(id string) *AsyncQueryDelete { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete an async ES|QL query. +// If the query is still running, it is cancelled. +// Otherwise, the stored results are deleted. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a query: +// +// * The authenticated user that submitted the original query request +// * Users with the `cancel_task` cluster privilege +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-delete-api.html +func New(tp elastictransport.Interface) *AsyncQueryDelete { + r := &AsyncQueryDelete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQueryDelete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQueryDelete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_delete") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query_delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQueryDelete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncquerydelete.Response +func (r AsyncQueryDelete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AsyncQueryDelete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AsyncQueryDelete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AsyncQueryDelete headers map. +func (r *AsyncQueryDelete) Header(key, value string) *AsyncQueryDelete { + r.headers.Set(key, value) + + return r +} + +// Id The unique identifier of the query. +// A query ID is provided in the ES|QL async query API response for a query that +// does not complete in the designated time. +// A query ID is also provided when the request was submitted with the +// `keep_on_completion` parameter set to `true`. +// API Name: id +func (r *AsyncQueryDelete) _id(id string) *AsyncQueryDelete { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQueryDelete) ErrorTrace(errortrace bool) *AsyncQueryDelete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQueryDelete) FilterPath(filterpaths ...string) *AsyncQueryDelete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQueryDelete) Human(human bool) *AsyncQueryDelete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQueryDelete) Pretty(pretty bool) *AsyncQueryDelete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/esql/asyncquerydelete/response.go b/typedapi/esql/asyncquerydelete/response.go new file mode 100644 index 0000000000..2207062389 --- /dev/null +++ b/typedapi/esql/asyncquerydelete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package asyncquerydelete + +// Response holds the response body struct for the package asyncquerydelete +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/async_query_delete/AsyncQueryDeleteResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/esql/asyncqueryget/async_query_get.go b/typedapi/esql/asyncqueryget/async_query_get.go new file mode 100644 index 0000000000..1da9760b68 --- /dev/null +++ b/typedapi/esql/asyncqueryget/async_query_get.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get async ES|QL query results. +// Get the current status and available results or stored results for an ES|QL +// asynchronous query. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can retrieve the results using this API. +package asyncqueryget + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQueryGet struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQueryGet type alias for index. +type NewAsyncQueryGet func(id string) *AsyncQueryGet + +// NewAsyncQueryGetFunc returns a new instance of AsyncQueryGet with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryGetFunc(tp elastictransport.Interface) NewAsyncQueryGet { + return func(id string) *AsyncQueryGet { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get async ES|QL query results. +// Get the current status and available results or stored results for an ES|QL +// asynchronous query. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can retrieve the results using this API. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-get-api.html +func New(tp elastictransport.Interface) *AsyncQueryGet { + r := &AsyncQueryGet{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQueryGet) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQueryGet) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_get") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query_get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQueryGet query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncqueryget.Response +func (r AsyncQueryGet) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AsyncQueryGet) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AsyncQueryGet query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AsyncQueryGet headers map. +func (r *AsyncQueryGet) Header(key, value string) *AsyncQueryGet { + r.headers.Set(key, value) + + return r +} + +// Id The unique identifier of the query. +// A query ID is provided in the ES|QL async query API response for a query that +// does not complete in the designated time. +// A query ID is also provided when the request was submitted with the +// `keep_on_completion` parameter set to `true`. +// API Name: id +func (r *AsyncQueryGet) _id(id string) *AsyncQueryGet { + r.paramSet |= idMask + r.id = id + + return r +} + +// DropNullColumns Indicates whether columns that are entirely `null` will be removed from the +// `columns` and `values` portion of the results. +// If `true`, the response will include an extra section under the name +// `all_columns` which has the name of all the columns. +// API name: drop_null_columns +func (r *AsyncQueryGet) DropNullColumns(dropnullcolumns bool) *AsyncQueryGet { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// KeepAlive The period for which the query and its results are stored in the cluster. +// When this period expires, the query and its results are deleted, even if the +// query is still ongoing. +// API name: keep_alive +func (r *AsyncQueryGet) KeepAlive(duration string) *AsyncQueryGet { + r.values.Set("keep_alive", duration) + + return r +} + +// WaitForCompletionTimeout The period to wait for the request to finish. +// By default, the request waits for complete query results. +// If the request completes during the period specified in this parameter, +// complete query results are returned. +// Otherwise, the response returns an `is_running` value of `true` and no +// results. +// API name: wait_for_completion_timeout +func (r *AsyncQueryGet) WaitForCompletionTimeout(duration string) *AsyncQueryGet { + r.values.Set("wait_for_completion_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQueryGet) ErrorTrace(errortrace bool) *AsyncQueryGet { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQueryGet) FilterPath(filterpaths ...string) *AsyncQueryGet { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQueryGet) Human(human bool) *AsyncQueryGet { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQueryGet) Pretty(pretty bool) *AsyncQueryGet { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/esql/asyncqueryget/response.go b/typedapi/esql/asyncqueryget/response.go new file mode 100644 index 0000000000..672345e2c6 --- /dev/null +++ b/typedapi/esql/asyncqueryget/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package asyncqueryget + +// Response holds the response body struct for the package asyncqueryget +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/async_query_get/AsyncQueryGetResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/esql/asyncquerystop/async_query_stop.go b/typedapi/esql/asyncquerystop/async_query_stop.go new file mode 100644 index 0000000000..6ea0c7edb8 --- /dev/null +++ b/typedapi/esql/asyncquerystop/async_query_stop.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Stop async ES|QL query. +// +// This API interrupts the query execution and returns the results so far. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can stop it. +package asyncquerystop + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQueryStop struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQueryStop type alias for index. +type NewAsyncQueryStop func(id string) *AsyncQueryStop + +// NewAsyncQueryStopFunc returns a new instance of AsyncQueryStop with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryStopFunc(tp elastictransport.Interface) NewAsyncQueryStop { + return func(id string) *AsyncQueryStop { + n := New(tp) + + n._id(id) + + return n + } +} + +// Stop async ES|QL query. +// +// This API interrupts the query execution and returns the results so far. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can stop it. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-stop-api.html +func New(tp elastictransport.Interface) *AsyncQueryStop { + r := &AsyncQueryStop{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQueryStop) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQueryStop) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_stop") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query_stop", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_stop") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQueryStop query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncquerystop.Response +func (r AsyncQueryStop) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AsyncQueryStop) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AsyncQueryStop query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AsyncQueryStop headers map. +func (r *AsyncQueryStop) Header(key, value string) *AsyncQueryStop { + r.headers.Set(key, value) + + return r +} + +// Id The unique identifier of the query. +// A query ID is provided in the ES|QL async query API response for a query that +// does not complete in the designated time. +// A query ID is also provided when the request was submitted with the +// `keep_on_completion` parameter set to `true`. +// API Name: id +func (r *AsyncQueryStop) _id(id string) *AsyncQueryStop { + r.paramSet |= idMask + r.id = id + + return r +} + +// DropNullColumns Indicates whether columns that are entirely `null` will be removed from the +// `columns` and `values` portion of the results. +// If `true`, the response will include an extra section under the name +// `all_columns` which has the name of all the columns. +// API name: drop_null_columns +func (r *AsyncQueryStop) DropNullColumns(dropnullcolumns bool) *AsyncQueryStop { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQueryStop) ErrorTrace(errortrace bool) *AsyncQueryStop { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQueryStop) FilterPath(filterpaths ...string) *AsyncQueryStop { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQueryStop) Human(human bool) *AsyncQueryStop { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQueryStop) Pretty(pretty bool) *AsyncQueryStop { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/esql/asyncquerystop/response.go b/typedapi/esql/asyncquerystop/response.go new file mode 100644 index 0000000000..77abdd6d82 --- /dev/null +++ b/typedapi/esql/asyncquerystop/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package asyncquerystop + +// Response holds the response body struct for the package asyncquerystop +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/async_query_stop/AsyncQueryStopResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/esql/query/helpers.go b/typedapi/esql/query/helpers.go index bde2cb2c4e..47db6ae65d 100644 --- a/typedapi/esql/query/helpers.go +++ b/typedapi/esql/query/helpers.go @@ -24,6 +24,8 @@ import ( "errors" "fmt" "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/esqlformat" ) type metadata struct { @@ -41,7 +43,7 @@ type esqlResponse struct { func Helper[T any](ctx context.Context, esqlQuery *Query) ([]T, error) { response, err := esqlQuery. Columnar(false). - Format("json"). + Format(esqlformat.Json). Header("x-elastic-client-meta", "h=qo"). Do(ctx) if err != nil { @@ -142,7 +144,7 @@ func (d iterator[T]) Next() (*T, error) { func NewIteratorHelper[T any](ctx context.Context, query *Query) (EsqlIterator[T], error) { response, err := query. Columnar(false). - Format("json"). + Format(esqlformat.Json). Header("x-elastic-client-meta", "h=qo"). Perform(ctx) if err != nil { diff --git a/typedapi/esql/query/query.go b/typedapi/esql/query/query.go index ea4a95d2a9..76e752788b 100644 --- a/typedapi/esql/query/query.go +++ b/typedapi/esql/query/query.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Executes an ES|QL request +// Run an ES|QL query. +// Get search results for an ES|QL (Elasticsearch query language) query. package query import ( @@ -35,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/esqlformat" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -73,7 +75,8 @@ func NewQueryFunc(tp elastictransport.Interface) NewQuery { } } -// Executes an ES|QL request +// Run an ES|QL query. +// Get search results for an ES|QL (Elasticsearch query language) query. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html func New(tp elastictransport.Interface) *Query { @@ -83,8 +86,6 @@ func New(tp elastictransport.Interface) *Query { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -292,8 +293,8 @@ func (r *Query) Header(key, value string) *Query { // Format A short version of the Accept header, e.g. json, yaml. // API name: format -func (r *Query) Format(format string) *Query { - r.values.Set("format", format) +func (r *Query) Format(format esqlformat.EsqlFormat) *Query { + r.values.Set("format", format.String()) return r } @@ -362,46 +363,83 @@ func (r *Query) Pretty(pretty bool) *Query { return r } -// Columnar By default, ES|QL returns results as rows. For example, FROM returns each +// By default, ES|QL returns results as rows. For example, FROM returns each // individual document as one row. For the JSON, YAML, CBOR and smile formats, // ES|QL can return the results in a columnar fashion where one row represents // all the values of a certain column in the results. // API name: columnar func (r *Query) Columnar(columnar bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Columnar = &columnar return r } -// Filter Specify a Query DSL query in the filter parameter to filter the set of +// Specify a Query DSL query in the filter parameter to filter the set of // documents that an ES|QL query runs on. // API name: filter -func (r *Query) Filter(filter *types.Query) *Query { +func (r *Query) Filter(filter types.QueryVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.QueryCaster() + + return r +} + +// When set to `true` and performing a cross-cluster query, the response will +// include an extra `_clusters` +// object with information about the clusters that participated in the search +// along with info such as shards +// count. +// API name: include_ccs_metadata +func (r *Query) IncludeCcsMetadata(includeccsmetadata bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.IncludeCcsMetadata = &includeccsmetadata return r } // API name: locale func (r *Query) Locale(locale string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Locale = &locale return r } -// Params To avoid any attempts of hacking or code injection, extract the values in a +// To avoid any attempts of hacking or code injection, extract the values in a // separate list of parameters. Use question mark placeholders (?) in the query // string for each of the parameters. // API name: params -func (r *Query) Params(params ...types.FieldValue) *Query { - r.req.Params = params +func (r *Query) Params(params ...types.FieldValueVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range params { + + r.req.Params = append(r.req.Params, *v.FieldValueCaster()) + } return r } -// Profile If provided and `true` the response will include an extra `profile` object +// If provided and `true` the response will include an extra `profile` object // with information on how the query was executed. This information is for human // debugging // and its format can change at any time but it can give some insight into the @@ -409,27 +447,38 @@ func (r *Query) Params(params ...types.FieldValue) *Query { // of each part of the query. // API name: profile func (r *Query) Profile(profile bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Query The ES|QL query API accepts an ES|QL query string in the query parameter, +// The ES|QL query API accepts an ES|QL query string in the query parameter, // runs it, and returns the results. // API name: query func (r *Query) Query(query string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Query = query return r } -// Tables Tables to use with the LOOKUP operation. The top level key is the table +// Tables to use with the LOOKUP operation. The top level key is the table // name and the next level key is the column name. // API name: tables func (r *Query) Tables(tables map[string]map[string]types.TableValuesContainer) *Query { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Tables = tables - return r } diff --git a/typedapi/esql/query/request.go b/typedapi/esql/query/request.go index 39d82f4199..27d725fd50 100644 --- a/typedapi/esql/query/request.go +++ b/typedapi/esql/query/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package query @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/esql/query/QueryRequest.ts#L26-L89 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/query/QueryRequest.ts#L27-L105 type Request struct { // Columnar By default, ES|QL returns results as rows. For example, FROM returns each @@ -40,7 +40,13 @@ type Request struct { // Filter Specify a Query DSL query in the filter parameter to filter the set of // documents that an ES|QL query runs on. Filter *types.Query `json:"filter,omitempty"` - Locale *string `json:"locale,omitempty"` + // IncludeCcsMetadata When set to `true` and performing a cross-cluster query, the response will + // include an extra `_clusters` + // object with information about the clusters that participated in the search + // along with info such as shards + // count. + IncludeCcsMetadata *bool `json:"include_ccs_metadata,omitempty"` + Locale *string `json:"locale,omitempty"` // Params To avoid any attempts of hacking or code injection, extract the values in a // separate list of parameters. Use question mark placeholders (?) in the query // string for each of the parameters. diff --git a/typedapi/esql/query/response.go b/typedapi/esql/query/response.go index b3a9a1b29f..524e712d74 100644 --- a/typedapi/esql/query/response.go +++ b/typedapi/esql/query/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package query // Response holds the response body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/esql/query/QueryResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/query/QueryResponse.ts#L22-L25 type Response = []byte diff --git a/typedapi/features/getfeatures/get_features.go b/typedapi/features/getfeatures/get_features.go index 496a114135..43e376d3b7 100644 --- a/typedapi/features/getfeatures/get_features.go +++ b/typedapi/features/getfeatures/get_features.go @@ -16,10 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Gets a list of features which can be included in snapshots using the -// feature_states field when creating a snapshot +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get the features. +// Get a list of features that can be included in snapshots using the +// `feature_states` field when creating a snapshot. +// You can use this API to determine which feature states to include when taking +// a snapshot. +// By default, all feature states are included in a snapshot if that snapshot +// includes the global state, or none if it does not. +// +// A feature state includes one or more system indices necessary for a given +// feature to function. +// In order to ensure data integrity, all system indices that comprise a feature +// state are snapshotted and restored together. +// +// The features listed by this API are a combination of built-in features and +// features defined by plugins. +// In order for a feature state to be listed in this API and recognized as a +// valid feature state by the create snapshot API, the plugin that defines that +// feature must be installed on the master node. package getfeatures import ( @@ -69,8 +85,24 @@ func NewGetFeaturesFunc(tp elastictransport.Interface) NewGetFeatures { } } -// Gets a list of features which can be included in snapshots using the -// feature_states field when creating a snapshot +// Get the features. +// Get a list of features that can be included in snapshots using the +// `feature_states` field when creating a snapshot. +// You can use this API to determine which feature states to include when taking +// a snapshot. +// By default, all feature states are included in a snapshot if that snapshot +// includes the global state, or none if it does not. +// +// A feature state includes one or more system indices necessary for a given +// feature to function. +// In order to ensure data integrity, all system indices that comprise a feature +// state are snapshotted and restored together. +// +// The features listed by this API are a combination of built-in features and +// features defined by plugins. +// In order for a feature state to be listed in this API and recognized as a +// valid feature state by the create snapshot API, the plugin that defines that +// feature must be installed on the master node. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-features-api.html func New(tp elastictransport.Interface) *GetFeatures { @@ -276,6 +308,14 @@ func (r *GetFeatures) Header(key, value string) *GetFeatures { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *GetFeatures) MasterTimeout(duration string) *GetFeatures { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/features/getfeatures/response.go b/typedapi/features/getfeatures/response.go index 222f42313d..b1d4515aa7 100644 --- a/typedapi/features/getfeatures/response.go +++ b/typedapi/features/getfeatures/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getfeatures @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getfeatures // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/features/get_features/GetFeaturesResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/features/get_features/GetFeaturesResponse.ts#L22-L26 type Response struct { Features []types.Feature `json:"features"` } diff --git a/typedapi/features/resetfeatures/reset_features.go b/typedapi/features/resetfeatures/reset_features.go index 4b4b6ce029..4e969a4404 100644 --- a/typedapi/features/resetfeatures/reset_features.go +++ b/typedapi/features/resetfeatures/reset_features.go @@ -16,9 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Resets the internal state of features, usually by deleting system indices +// Reset the features. +// Clear all of the state information stored in system indices by Elasticsearch +// features, including the security and machine learning indices. +// +// WARNING: Intended for development and testing use only. Do not reset features +// on a production cluster. +// +// Return a cluster to the same state as a new installation by resetting the +// feature state for all Elasticsearch features. +// This deletes all state information stored in system indices. +// +// The response code is HTTP 200 if the state is successfully reset for all +// features. +// It is HTTP 500 if the reset operation failed for any feature. +// +// Note that select features might provide a way to reset particular system +// indices. +// Using this API resets all features, both those that are built-in and +// implemented as plugins. +// +// To list the features that will be affected, use the get features API. +// +// IMPORTANT: The features installed on the node you submit this request to are +// the features that will be reset. Run on the master node if you have any +// doubts about which plugins are installed on individual nodes. package resetfeatures import ( @@ -68,9 +92,33 @@ func NewResetFeaturesFunc(tp elastictransport.Interface) NewResetFeatures { } } -// Resets the internal state of features, usually by deleting system indices +// Reset the features. +// Clear all of the state information stored in system indices by Elasticsearch +// features, including the security and machine learning indices. +// +// WARNING: Intended for development and testing use only. Do not reset features +// on a production cluster. +// +// Return a cluster to the same state as a new installation by resetting the +// feature state for all Elasticsearch features. +// This deletes all state information stored in system indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// The response code is HTTP 200 if the state is successfully reset for all +// features. +// It is HTTP 500 if the reset operation failed for any feature. +// +// Note that select features might provide a way to reset particular system +// indices. +// Using this API resets all features, both those that are built-in and +// implemented as plugins. +// +// To list the features that will be affected, use the get features API. +// +// IMPORTANT: The features installed on the node you submit this request to are +// the features that will be reset. Run on the master node if you have any +// doubts about which plugins are installed on individual nodes. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-features-api.html func New(tp elastictransport.Interface) *ResetFeatures { r := &ResetFeatures{ transport: tp, @@ -276,6 +324,14 @@ func (r *ResetFeatures) Header(key, value string) *ResetFeatures { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *ResetFeatures) MasterTimeout(duration string) *ResetFeatures { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/features/resetfeatures/response.go b/typedapi/features/resetfeatures/response.go index 470d90a062..b4273937ce 100644 --- a/typedapi/features/resetfeatures/response.go +++ b/typedapi/features/resetfeatures/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package resetfeatures @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package resetfeatures // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/features/reset_features/ResetFeaturesResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/features/reset_features/ResetFeaturesResponse.ts#L22-L26 type Response struct { Features []types.Feature `json:"features"` } diff --git a/typedapi/fleet/globalcheckpoints/global_checkpoints.go b/typedapi/fleet/globalcheckpoints/global_checkpoints.go index 2851562b2d..f3880e5001 100644 --- a/typedapi/fleet/globalcheckpoints/global_checkpoints.go +++ b/typedapi/fleet/globalcheckpoints/global_checkpoints.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the current global checkpoints for an index. This API is design for -// internal use by the fleet server project. +// Get global checkpoints. +// +// Get the current global checkpoints for an index. +// This API is designed for internal use by the Fleet server project. package globalcheckpoints import ( @@ -77,8 +79,10 @@ func NewGlobalCheckpointsFunc(tp elastictransport.Interface) NewGlobalCheckpoint } } -// Returns the current global checkpoints for an index. This API is design for -// internal use by the fleet server project. +// Get global checkpoints. +// +// Get the current global checkpoints for an index. +// This API is designed for internal use by the Fleet server project. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-global-checkpoints.html func New(tp elastictransport.Interface) *GlobalCheckpoints { diff --git a/typedapi/fleet/globalcheckpoints/response.go b/typedapi/fleet/globalcheckpoints/response.go index 48e6c0b0e1..312dfe1ad0 100644 --- a/typedapi/fleet/globalcheckpoints/response.go +++ b/typedapi/fleet/globalcheckpoints/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package globalcheckpoints // Response holds the response body struct for the package globalcheckpoints // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/fleet/global_checkpoints/GlobalCheckpointsResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/fleet/global_checkpoints/GlobalCheckpointsResponse.ts#L22-L27 type Response struct { GlobalCheckpoints []int64 `json:"global_checkpoints"` TimedOut bool `json:"timed_out"` diff --git a/typedapi/fleet/msearch/msearch.go b/typedapi/fleet/msearch/msearch.go index 4c9b3508ca..efa3c48570 100644 --- a/typedapi/fleet/msearch/msearch.go +++ b/typedapi/fleet/msearch/msearch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Executes several [fleet // searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) @@ -94,6 +94,8 @@ func NewMsearchFunc(tp elastictransport.Interface) NewMsearch { // search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) // API. However, similar to the fleet search API, it // supports the wait_for_checkpoints parameter. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-multi-search.html func New(tp elastictransport.Interface) *Msearch { r := &Msearch{ transport: tp, diff --git a/typedapi/fleet/msearch/request.go b/typedapi/fleet/msearch/request.go index 841a086197..4c78774b95 100644 --- a/typedapi/fleet/msearch/request.go +++ b/typedapi/fleet/msearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package msearch @@ -26,5 +26,5 @@ import ( // Request holds the request body struct for the package msearch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/fleet/msearch/MultiSearchRequest.ts#L32-L115 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/fleet/msearch/MultiSearchRequest.ts#L31-L125 type Request = []types.MsearchRequestItem diff --git a/typedapi/fleet/msearch/response.go b/typedapi/fleet/msearch/response.go index f9da10f850..416881894f 100644 --- a/typedapi/fleet/msearch/response.go +++ b/typedapi/fleet/msearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package msearch @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package msearch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/fleet/msearch/MultiSearchResponse.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/fleet/msearch/MultiSearchResponse.ts#L25-L29 type Response struct { Docs []types.MsearchResponseItem `json:"docs"` } diff --git a/typedapi/fleet/postsecret/post_secret.go b/typedapi/fleet/postsecret/post_secret.go index 7ab977cc6b..87c3bb3595 100644 --- a/typedapi/fleet/postsecret/post_secret.go +++ b/typedapi/fleet/postsecret/post_secret.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Creates a secret stored by Fleet. package postsecret diff --git a/typedapi/fleet/search/request.go b/typedapi/fleet/search/request.go index da5be08569..54c586476f 100644 --- a/typedapi/fleet/search/request.go +++ b/typedapi/fleet/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package search @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/fleet/search/SearchRequest.ts#L55-L260 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/fleet/search/SearchRequest.ts#L55-L267 type Request struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Collapse *types.FieldCollapse `json:"collapse,omitempty"` diff --git a/typedapi/fleet/search/response.go b/typedapi/fleet/search/response.go index b63d2273aa..c3950c9b7c 100644 --- a/typedapi/fleet/search/response.go +++ b/typedapi/fleet/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package search @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/fleet/search/SearchResponse.ts#L33-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/fleet/search/SearchResponse.ts#L33-L50 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` @@ -504,6 +504,13 @@ func (s *Response) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { diff --git a/typedapi/fleet/search/search.go b/typedapi/fleet/search/search.go index 7f616c7f52..879c3dc364 100644 --- a/typedapi/fleet/search/search.go +++ b/typedapi/fleet/search/search.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // The purpose of the fleet search api is to provide a search api where the // search will only be executed @@ -92,6 +92,8 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { // search will only be executed // after provided checkpoint has been processed and is visible for searches // inside of Elasticsearch. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html func New(tp elastictransport.Interface) *Search { r := &Search{ transport: tp, @@ -99,8 +101,6 @@ func New(tp elastictransport.Interface) *Search { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -604,213 +604,386 @@ func (r *Search) Pretty(pretty bool) *Search { // API name: aggregations func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *Search) AddAggregation(key string, value types.AggregationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp return r } // API name: collapse -func (r *Search) Collapse(collapse *types.FieldCollapse) *Search { +func (r *Search) Collapse(collapse types.FieldCollapseVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Collapse = collapse + r.req.Collapse = collapse.FieldCollapseCaster() return r } -// DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field +// Array of wildcard (*) patterns. The request returns doc values for field // names matching these patterns in the hits.fields property of the response. // API name: docvalue_fields -func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Search { - r.req.DocvalueFields = docvaluefields +func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + + } return r } -// Explain If true, returns detailed information about score computation as part of a +// If true, returns detailed information about score computation as part of a // hit. // API name: explain func (r *Search) Explain(explain bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Ext Configuration of search extensions defined by Elasticsearch plugins. +// Configuration of search extensions defined by Elasticsearch plugins. // API name: ext func (r *Search) Ext(ext map[string]json.RawMessage) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Ext = ext + return r +} + +func (r *Search) AddExt(key string, value json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Ext == nil { + r.req.Ext = make(map[string]json.RawMessage) + } else { + tmp = r.req.Ext + } + tmp[key] = value + + r.req.Ext = tmp return r } -// Fields Array of wildcard (*) patterns. The request returns values for field names +// Array of wildcard (*) patterns. The request returns values for field names // matching these patterns in the hits.fields property of the response. // API name: fields -func (r *Search) Fields(fields ...types.FieldAndFormat) *Search { - r.req.Fields = fields +func (r *Search) Fields(fields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range fields { + + r.req.Fields = append(r.req.Fields, *v.FieldAndFormatCaster()) + } return r } -// From Starting document offset. By default, you cannot page through more than +// Starting document offset. By default, you cannot page through more than // 10,000 // hits using the from and size parameters. To page through more hits, use the // search_after parameter. // API name: from func (r *Search) From(from int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } // API name: highlight -func (r *Search) Highlight(highlight *types.Highlight) *Search { +func (r *Search) Highlight(highlight types.HighlightVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Highlight = highlight + r.req.Highlight = highlight.HighlightCaster() return r } -// IndicesBoost Boosts the _score of documents from specified indices. +// Boosts the _score of documents from specified indices. // API name: indices_boost -func (r *Search) IndicesBoost(indicesboosts ...map[string]types.Float64) *Search { - r.req.IndicesBoost = indicesboosts +func (r *Search) IndicesBoost(indicesboost []map[string]types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesBoost = indicesboost return r } -// MinScore Minimum _score for matching documents. Documents with a lower _score are +// Minimum _score for matching documents. Documents with a lower _score are // not included in the search results. // API name: min_score func (r *Search) MinScore(minscore types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MinScore = &minscore return r } -// Pit Limits the search to a point in time (PIT). If you provide a PIT, you +// Limits the search to a point in time (PIT). If you provide a PIT, you // cannot specify an in the request path. // API name: pit -func (r *Search) Pit(pit *types.PointInTimeReference) *Search { +func (r *Search) Pit(pit types.PointInTimeReferenceVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pit = pit + r.req.Pit = pit.PointInTimeReferenceCaster() return r } // API name: post_filter -func (r *Search) PostFilter(postfilter *types.Query) *Search { +func (r *Search) PostFilter(postfilter types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PostFilter = postfilter + r.req.PostFilter = postfilter.QueryCaster() return r } // API name: profile func (r *Search) Profile(profile bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Profile = &profile return r } -// Query Defines the search definition using the Query DSL. +// Defines the search definition using the Query DSL. // API name: query -func (r *Search) Query(query *types.Query) *Search { +func (r *Search) Query(query types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } // API name: rescore -func (r *Search) Rescore(rescores ...types.Rescore) *Search { - r.req.Rescore = rescores +func (r *Search) Rescore(rescores ...types.RescoreVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + r.req.Rescore[i] = *v.RescoreCaster() + } return r } -// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search { - r.req.RuntimeMappings = runtimefields +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Retrieve a script evaluation (based on different fields) for each hit. +// Retrieve a script evaluation (based on different fields) for each hit. // API name: script_fields func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} +func (r *Search) AddScriptField(key string, value types.ScriptFieldVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + r.req.ScriptFields = tmp return r } // API name: search_after -func (r *Search) SearchAfter(sortresults ...types.FieldValue) *Search { - r.req.SearchAfter = sortresults +func (r *Search) SearchAfter(sortresults ...types.FieldValueVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification +// If true, returns sequence number and primary term of the last modification // of each hit. See Optimistic concurrency control. // API name: seq_no_primary_term func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm return r } -// Size The number of hits to return. By default, you cannot page through more +// The number of hits to return. By default, you cannot page through more // than 10,000 hits using the from and size parameters. To page through more // hits, use the search_after parameter. // API name: size func (r *Search) Size(size int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } // API name: slice -func (r *Search) Slice(slice *types.SlicedScroll) *Search { +func (r *Search) Slice(slice types.SlicedScrollVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Slice = slice + r.req.Slice = slice.SlicedScrollCaster() return r } // API name: sort -func (r *Search) Sort(sorts ...types.SortCombinations) *Search { - r.req.Sort = sorts +func (r *Search) Sort(sorts ...types.SortCombinationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } -// Source_ Indicates which source fields are returned for matching documents. These +// Indicates which source fields are returned for matching documents. These // fields are returned in the hits._source property of the search response. // API name: _source -func (r *Search) Source_(sourceconfig types.SourceConfig) *Search { - r.req.Source_ = sourceconfig +func (r *Search) Source_(sourceconfig types.SourceConfigVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() return r } -// Stats Stats groups to associate with the search. Each group maintains a statistics +// Stats groups to associate with the search. Each group maintains a statistics // aggregation for its associated searches. You can retrieve these stats using // the indices stats API. // API name: stats func (r *Search) Stats(stats ...string) *Search { - r.req.Stats = stats + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stats { + + r.req.Stats = append(r.req.Stats, v) + } return r } -// StoredFields List of stored fields to return as part of a hit. If no fields are specified, +// List of stored fields to return as part of a hit. If no fields are specified, // no stored fields are included in the response. If this field is specified, // the _source // parameter defaults to false. You can pass _source: true to return both source @@ -818,20 +991,29 @@ func (r *Search) Stats(stats ...string) *Search { // and stored fields in the search response. // API name: stored_fields func (r *Search) StoredFields(fields ...string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.StoredFields = fields return r } // API name: suggest -func (r *Search) Suggest(suggest *types.Suggester) *Search { +func (r *Search) Suggest(suggest types.SuggesterVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Suggest = suggest + r.req.Suggest = suggest.SuggesterCaster() return r } -// TerminateAfter Maximum number of documents to collect for each shard. If a query reaches +// Maximum number of documents to collect for each shard. If a query reaches // this // limit, Elasticsearch terminates the query early. Elasticsearch collects // documents @@ -839,48 +1021,71 @@ func (r *Search) Suggest(suggest *types.Suggester) *Search { // early. // API name: terminate_after func (r *Search) TerminateAfter(terminateafter int64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TerminateAfter = &terminateafter return r } -// Timeout Specifies the period of time to wait for a response from each shard. If no +// Specifies the period of time to wait for a response from each shard. If no // response // is received before the timeout expires, the request fails and returns an // error. // Defaults to no timeout. // API name: timeout func (r *Search) Timeout(timeout string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Timeout = &timeout return r } -// TrackScores If true, calculate and return document scores, even if the scores are not +// If true, calculate and return document scores, even if the scores are not // used for sorting. // API name: track_scores func (r *Search) TrackScores(trackscores bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TrackScores = &trackscores return r } -// TrackTotalHits Number of hits matching the query to count accurately. If true, the exact +// Number of hits matching the query to count accurately. If true, the exact // number of hits is returned at the cost of some performance. If false, the // response does not include the total number of hits matching the query. // Defaults to 10,000 hits. // API name: track_total_hits -func (r *Search) TrackTotalHits(trackhits types.TrackHits) *Search { - r.req.TrackTotalHits = trackhits +func (r *Search) TrackTotalHits(trackhits types.TrackHitsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() return r } -// Version If true, returns document version as part of a hit. +// If true, returns document version as part of a hit. // API name: version func (r *Search) Version(version bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &version return r diff --git a/typedapi/graph/explore/explore.go b/typedapi/graph/explore/explore.go index 968bd9bdbe..ba9fd73bcf 100644 --- a/typedapi/graph/explore/explore.go +++ b/typedapi/graph/explore/explore.go @@ -16,10 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Extracts and summarizes information about the documents and terms in an +// Explore graph analytics. +// Extract and summarize information about the documents and terms in an // Elasticsearch data stream or index. +// The easiest way to understand the behavior of this API is to use the Graph UI +// to explore connections. +// An initial request to the `_explore` API contains a seed query that +// identifies the documents of interest and specifies the fields that define the +// vertices and connections you want to include in the graph. +// Subsequent requests enable you to spider out from one more vertices of +// interest. +// You can exclude vertices that have already been returned. package explore import ( @@ -82,8 +91,17 @@ func NewExploreFunc(tp elastictransport.Interface) NewExplore { } } -// Extracts and summarizes information about the documents and terms in an +// Explore graph analytics. +// Extract and summarize information about the documents and terms in an // Elasticsearch data stream or index. +// The easiest way to understand the behavior of this API is to use the Graph UI +// to explore connections. +// An initial request to the `_explore` API contains a seed query that +// identifies the documents of interest and specifies the fields that define the +// vertices and connections you want to include in the graph. +// Subsequent requests enable you to spider out from one more vertices of +// interest. +// You can exclude vertices that have already been returned. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html func New(tp elastictransport.Interface) *Explore { @@ -93,8 +111,6 @@ func New(tp elastictransport.Interface) *Explore { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -380,40 +396,59 @@ func (r *Explore) Pretty(pretty bool) *Explore { return r } -// Connections Specifies or more fields from which you want to extract terms that are +// Specifies or more fields from which you want to extract terms that are // associated with the specified vertices. // API name: connections -func (r *Explore) Connections(connections *types.Hop) *Explore { +func (r *Explore) Connections(connections types.HopVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Connections = connections + r.req.Connections = connections.HopCaster() return r } -// Controls Direct the Graph API how to build the graph. +// Direct the Graph API how to build the graph. // API name: controls -func (r *Explore) Controls(controls *types.ExploreControls) *Explore { +func (r *Explore) Controls(controls types.ExploreControlsVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Controls = controls + r.req.Controls = controls.ExploreControlsCaster() return r } -// Query A seed query that identifies the documents of interest. Can be any valid +// A seed query that identifies the documents of interest. Can be any valid // Elasticsearch query. // API name: query -func (r *Explore) Query(query *types.Query) *Explore { +func (r *Explore) Query(query types.QueryVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Vertices Specifies one or more fields that contain the terms you want to include in +// Specifies one or more fields that contain the terms you want to include in // the graph as vertices. // API name: vertices -func (r *Explore) Vertices(vertices ...types.VertexDefinition) *Explore { - r.req.Vertices = vertices +func (r *Explore) Vertices(vertices ...types.VertexDefinitionVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range vertices { + + r.req.Vertices = append(r.req.Vertices, *v.VertexDefinitionCaster()) + } return r } diff --git a/typedapi/graph/explore/request.go b/typedapi/graph/explore/request.go index f97875687b..a285d9eefb 100644 --- a/typedapi/graph/explore/request.go +++ b/typedapi/graph/explore/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package explore @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package explore // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/graph/explore/GraphExploreRequest.ts#L28-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/graph/explore/GraphExploreRequest.ts#L28-L84 type Request struct { // Connections Specifies or more fields from which you want to extract terms that are diff --git a/typedapi/graph/explore/response.go b/typedapi/graph/explore/response.go index 97170a73ee..40983c1a01 100644 --- a/typedapi/graph/explore/response.go +++ b/typedapi/graph/explore/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package explore @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explore // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/graph/explore/GraphExploreResponse.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/graph/explore/GraphExploreResponse.ts#L25-L33 type Response struct { Connections []types.Connection `json:"connections"` Failures []types.ShardFailure `json:"failures"` diff --git a/typedapi/ilm/deletelifecycle/delete_lifecycle.go b/typedapi/ilm/deletelifecycle/delete_lifecycle.go index baee0e06f7..3440be4aa6 100644 --- a/typedapi/ilm/deletelifecycle/delete_lifecycle.go +++ b/typedapi/ilm/deletelifecycle/delete_lifecycle.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes the specified lifecycle policy definition. You cannot delete policies -// that are currently in use. If the policy is being used to manage any indices, -// the request fails and returns an error. +// Delete a lifecycle policy. +// You cannot delete policies that are currently in use. If the policy is being +// used to manage any indices, the request fails and returns an error. package deletelifecycle import ( @@ -78,9 +78,9 @@ func NewDeleteLifecycleFunc(tp elastictransport.Interface) NewDeleteLifecycle { } } -// Deletes the specified lifecycle policy definition. You cannot delete policies -// that are currently in use. If the policy is being used to manage any indices, -// the request fails and returns an error. +// Delete a lifecycle policy. +// You cannot delete policies that are currently in use. If the policy is being +// used to manage any indices, the request fails and returns an error. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html func New(tp elastictransport.Interface) *DeleteLifecycle { diff --git a/typedapi/ilm/deletelifecycle/response.go b/typedapi/ilm/deletelifecycle/response.go index 28fddc7437..952d92cbf9 100644 --- a/typedapi/ilm/deletelifecycle/response.go +++ b/typedapi/ilm/deletelifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletelifecycle // Response holds the response body struct for the package deletelifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/delete_lifecycle/DeleteLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/delete_lifecycle/DeleteLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/explainlifecycle/explain_lifecycle.go b/typedapi/ilm/explainlifecycle/explain_lifecycle.go index 72566e8f14..bc06263bbf 100644 --- a/typedapi/ilm/explainlifecycle/explain_lifecycle.go +++ b/typedapi/ilm/explainlifecycle/explain_lifecycle.go @@ -16,12 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information about the index’s current lifecycle state, such as the -// currently executing phase, action, and step. Shows when the index entered -// each one, the definition of the running phase, and information about any -// failures. +// Explain the lifecycle state. +// Get the current lifecycle status for one or more indices. +// For data streams, the API retrieves the current lifecycle status for the +// stream's backing indices. +// +// The response indicates when the index entered each lifecycle state, provides +// the definition of the running phase, and information about any failures. package explainlifecycle import ( @@ -79,10 +82,13 @@ func NewExplainLifecycleFunc(tp elastictransport.Interface) NewExplainLifecycle } } -// Retrieves information about the index’s current lifecycle state, such as the -// currently executing phase, action, and step. Shows when the index entered -// each one, the definition of the running phase, and information about any -// failures. +// Explain the lifecycle state. +// Get the current lifecycle status for one or more indices. +// For data streams, the API retrieves the current lifecycle status for the +// stream's backing indices. +// +// The response indicates when the index entered each lifecycle state, provides +// the definition of the running phase, and information about any failures. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-lifecycle.html func New(tp elastictransport.Interface) *ExplainLifecycle { @@ -334,15 +340,6 @@ func (r *ExplainLifecycle) MasterTimeout(duration string) *ExplainLifecycle { return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. -// API name: timeout -func (r *ExplainLifecycle) Timeout(duration string) *ExplainLifecycle { - r.values.Set("timeout", duration) - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ilm/explainlifecycle/response.go b/typedapi/ilm/explainlifecycle/response.go index d1c2d7074b..d98499435e 100644 --- a/typedapi/ilm/explainlifecycle/response.go +++ b/typedapi/ilm/explainlifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package explainlifecycle @@ -24,6 +24,7 @@ import ( "bytes" "encoding/json" "errors" + "fmt" "io" "github.com/elastic/go-elasticsearch/v8/typedapi/types" @@ -31,7 +32,7 @@ import ( // Response holds the response body struct for the package explainlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/explain_lifecycle/ExplainLifecycleResponse.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/explain_lifecycle/ExplainLifecycleResponse.ts#L24-L28 type Response struct { Indices map[string]types.LifecycleExplain `json:"indices"` } @@ -75,19 +76,19 @@ func (s *Response) UnmarshalJSON(data []byte) error { case true: oo := types.NewLifecycleExplainManaged() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Indices | %w", err) } s.Indices[key] = oo case false: oo := types.NewLifecycleExplainUnmanaged() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Indices | %w", err) } s.Indices[key] = oo default: oo := new(types.LifecycleExplain) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(types.LifecycleExplain) | %w", err) } s.Indices[key] = oo } diff --git a/typedapi/ilm/getlifecycle/get_lifecycle.go b/typedapi/ilm/getlifecycle/get_lifecycle.go index aebef1ce21..369da2d923 100644 --- a/typedapi/ilm/getlifecycle/get_lifecycle.go +++ b/typedapi/ilm/getlifecycle/get_lifecycle.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves a lifecycle policy. +// Get lifecycle policies. package getlifecycle import ( @@ -74,7 +74,7 @@ func NewGetLifecycleFunc(tp elastictransport.Interface) NewGetLifecycle { } } -// Retrieves a lifecycle policy. +// Get lifecycle policies. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html func New(tp elastictransport.Interface) *GetLifecycle { diff --git a/typedapi/ilm/getlifecycle/response.go b/typedapi/ilm/getlifecycle/response.go index 40b5439996..daa30fa902 100644 --- a/typedapi/ilm/getlifecycle/response.go +++ b/typedapi/ilm/getlifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getlifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/get_lifecycle/GetLifecycleResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/get_lifecycle/GetLifecycleResponse.ts#L23-L26 type Response map[string]types.Lifecycle diff --git a/typedapi/ilm/getstatus/get_status.go b/typedapi/ilm/getstatus/get_status.go index f2a6f8ecb2..c93af7dab9 100644 --- a/typedapi/ilm/getstatus/get_status.go +++ b/typedapi/ilm/getstatus/get_status.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves the current index lifecycle management (ILM) status. +// Get the ILM status. +// +// Get the current index lifecycle management status. package getstatus import ( @@ -68,7 +70,9 @@ func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { } } -// Retrieves the current index lifecycle management (ILM) status. +// Get the ILM status. +// +// Get the current index lifecycle management status. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.html func New(tp elastictransport.Interface) *GetStatus { diff --git a/typedapi/ilm/getstatus/response.go b/typedapi/ilm/getstatus/response.go index 6eb05f8e9c..492ce2e8c2 100644 --- a/typedapi/ilm/getstatus/response.go +++ b/typedapi/ilm/getstatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getstatus @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getstatus // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/get_status/GetIlmStatusResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/get_status/GetIlmStatusResponse.ts#L22-L24 type Response struct { OperationMode lifecycleoperationmode.LifecycleOperationMode `json:"operation_mode"` } diff --git a/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go b/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go index f913549dfc..cfb6205466 100644 --- a/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go +++ b/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go @@ -16,14 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Switches the indices, ILM policies, and legacy, composable and component -// templates from using custom node attributes and -// attribute-based allocation filters to using data tiers, and optionally -// deletes one legacy index template.+ +// Migrate to data tiers routing. +// Switch the indices, ILM policies, and legacy, composable, and component +// templates from using custom node attributes and attribute-based allocation +// filters to using data tiers. +// Optionally, delete one legacy index template. // Using node roles enables ILM to automatically move the indices between data // tiers. +// +// Migrating away from custom node attributes routing can be manually performed. +// This API provides an automated way of performing three out of the four manual +// steps listed in the migration guide: +// +// 1. Stop setting the custom hot attribute on new indices. +// 1. Remove custom allocation settings from existing ILM policies. +// 1. Replace custom allocation settings from existing indices with the +// corresponding tier preference. +// +// ILM must be stopped before performing the migration. +// Use the stop ILM and get ILM status APIs to wait until the reported operation +// mode is `STOPPED`. package migratetodatatiers import ( @@ -78,13 +92,27 @@ func NewMigrateToDataTiersFunc(tp elastictransport.Interface) NewMigrateToDataTi } } -// Switches the indices, ILM policies, and legacy, composable and component -// templates from using custom node attributes and -// attribute-based allocation filters to using data tiers, and optionally -// deletes one legacy index template.+ +// Migrate to data tiers routing. +// Switch the indices, ILM policies, and legacy, composable, and component +// templates from using custom node attributes and attribute-based allocation +// filters to using data tiers. +// Optionally, delete one legacy index template. // Using node roles enables ILM to automatically move the indices between data // tiers. // +// Migrating away from custom node attributes routing can be manually performed. +// This API provides an automated way of performing three out of the four manual +// steps listed in the migration guide: +// +// 1. Stop setting the custom hot attribute on new indices. +// 1. Remove custom allocation settings from existing ILM policies. +// 1. Replace custom allocation settings from existing indices with the +// corresponding tier preference. +// +// ILM must be stopped before performing the migration. +// Use the stop ILM and get ILM status APIs to wait until the reported operation +// mode is `STOPPED`. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-data-tiers.html func New(tp elastictransport.Interface) *MigrateToDataTiers { r := &MigrateToDataTiers{ @@ -93,8 +121,6 @@ func New(tp elastictransport.Interface) *MigrateToDataTiers { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -359,6 +385,10 @@ func (r *MigrateToDataTiers) Pretty(pretty bool) *MigrateToDataTiers { // API name: legacy_template_to_delete func (r *MigrateToDataTiers) LegacyTemplateToDelete(legacytemplatetodelete string) *MigrateToDataTiers { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.LegacyTemplateToDelete = &legacytemplatetodelete @@ -367,6 +397,10 @@ func (r *MigrateToDataTiers) LegacyTemplateToDelete(legacytemplatetodelete strin // API name: node_attribute func (r *MigrateToDataTiers) NodeAttribute(nodeattribute string) *MigrateToDataTiers { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.NodeAttribute = &nodeattribute diff --git a/typedapi/ilm/migratetodatatiers/request.go b/typedapi/ilm/migratetodatatiers/request.go index 017ca42339..2a2c9a2779 100644 --- a/typedapi/ilm/migratetodatatiers/request.go +++ b/typedapi/ilm/migratetodatatiers/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package migratetodatatiers @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package migratetodatatiers // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/migrate_to_data_tiers/Request.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/migrate_to_data_tiers/Request.ts#L22-L61 type Request struct { LegacyTemplateToDelete *string `json:"legacy_template_to_delete,omitempty"` NodeAttribute *string `json:"node_attribute,omitempty"` diff --git a/typedapi/ilm/migratetodatatiers/response.go b/typedapi/ilm/migratetodatatiers/response.go index bcbebb83f4..9dd6bc52aa 100644 --- a/typedapi/ilm/migratetodatatiers/response.go +++ b/typedapi/ilm/migratetodatatiers/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package migratetodatatiers @@ -31,15 +31,25 @@ import ( // Response holds the response body struct for the package migratetodatatiers // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/migrate_to_data_tiers/Response.ts#L22-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/migrate_to_data_tiers/Response.ts#L22-L51 type Response struct { - DryRun bool `json:"dry_run"` - MigratedComponentTemplates []string `json:"migrated_component_templates"` + DryRun bool `json:"dry_run"` + // MigratedComponentTemplates The component templates that were updated to not contain custom routing + // settings for the provided data attribute. + MigratedComponentTemplates []string `json:"migrated_component_templates"` + // MigratedComposableTemplates The composable index templates that were updated to not contain custom + // routing settings for the provided data attribute. MigratedComposableTemplates []string `json:"migrated_composable_templates"` - MigratedIlmPolicies []string `json:"migrated_ilm_policies"` - MigratedIndices []string `json:"migrated_indices"` - MigratedLegacyTemplates []string `json:"migrated_legacy_templates"` - RemovedLegacyTemplate string `json:"removed_legacy_template"` + // MigratedIlmPolicies The ILM policies that were updated. + MigratedIlmPolicies []string `json:"migrated_ilm_policies"` + // MigratedIndices The indices that were migrated to tier preference routing. + MigratedIndices []string `json:"migrated_indices"` + // MigratedLegacyTemplates The legacy index templates that were updated to not contain custom routing + // settings for the provided data attribute. + MigratedLegacyTemplates []string `json:"migrated_legacy_templates"` + // RemovedLegacyTemplate The name of the legacy index template that was deleted. + // This information is missing if no legacy index templates were deleted. + RemovedLegacyTemplate string `json:"removed_legacy_template"` } // NewResponse returns a Response diff --git a/typedapi/ilm/movetostep/move_to_step.go b/typedapi/ilm/movetostep/move_to_step.go index 0550f62072..1a4f02fca5 100644 --- a/typedapi/ilm/movetostep/move_to_step.go +++ b/typedapi/ilm/movetostep/move_to_step.go @@ -16,9 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Manually moves an index into the specified step and executes that step. +// Move to a lifecycle step. +// Manually move an index into a specific step in the lifecycle policy and run +// that step. +// +// WARNING: This operation can result in the loss of data. Manually moving an +// index into a specific step runs that step even if it has already been +// performed. This is a potentially destructive action and this should be +// considered an expert level API. +// +// You must specify both the current step and the step to be executed in the +// body of the request. +// The request will fail if the current step does not match the step currently +// running for the index +// This is to prevent the index from being moved from an unexpected step into +// the next step. +// +// When specifying the target (`next_step`) to which the index will be moved, +// either the name or both the action and name fields are optional. +// If only the phase is specified, the index will move to the first step of the +// first action in the target phase. +// If the phase and action are specified, the index will move to the first step +// of the specified action in the specified phase. +// Only actions specified in the ILM policy are considered valid. +// An index cannot move to a step that is not part of its policy. package movetostep import ( @@ -81,7 +104,30 @@ func NewMoveToStepFunc(tp elastictransport.Interface) NewMoveToStep { } } -// Manually moves an index into the specified step and executes that step. +// Move to a lifecycle step. +// Manually move an index into a specific step in the lifecycle policy and run +// that step. +// +// WARNING: This operation can result in the loss of data. Manually moving an +// index into a specific step runs that step even if it has already been +// performed. This is a potentially destructive action and this should be +// considered an expert level API. +// +// You must specify both the current step and the step to be executed in the +// body of the request. +// The request will fail if the current step does not match the step currently +// running for the index +// This is to prevent the index from being moved from an unexpected step into +// the next step. +// +// When specifying the target (`next_step`) to which the index will be moved, +// either the name or both the action and name fields are optional. +// If only the phase is specified, the index will move to the first step of the +// first action in the target phase. +// If the phase and action are specified, the index will move to the first step +// of the specified action in the specified phase. +// Only actions specified in the ILM policy are considered valid. +// An index cannot move to a step that is not part of its policy. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html func New(tp elastictransport.Interface) *MoveToStep { @@ -91,8 +137,6 @@ func New(tp elastictransport.Interface) *MoveToStep { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -359,18 +403,28 @@ func (r *MoveToStep) Pretty(pretty bool) *MoveToStep { return r } +// The step that the index is expected to be in. // API name: current_step -func (r *MoveToStep) CurrentStep(currentstep *types.StepKey) *MoveToStep { +func (r *MoveToStep) CurrentStep(currentstep types.StepKeyVariant) *MoveToStep { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.CurrentStep = currentstep + r.req.CurrentStep = *currentstep.StepKeyCaster() return r } +// The step that you want to run. // API name: next_step -func (r *MoveToStep) NextStep(nextstep *types.StepKey) *MoveToStep { +func (r *MoveToStep) NextStep(nextstep types.StepKeyVariant) *MoveToStep { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.NextStep = nextstep + r.req.NextStep = *nextstep.StepKeyCaster() return r } diff --git a/typedapi/ilm/movetostep/request.go b/typedapi/ilm/movetostep/request.go index 4dfa9577f6..0143627cba 100644 --- a/typedapi/ilm/movetostep/request.go +++ b/typedapi/ilm/movetostep/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package movetostep @@ -29,10 +29,13 @@ import ( // Request holds the request body struct for the package movetostep // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/move_to_step/MoveToStepRequest.ts#L24-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/move_to_step/MoveToStepRequest.ts#L24-L64 type Request struct { - CurrentStep *types.StepKey `json:"current_step,omitempty"` - NextStep *types.StepKey `json:"next_step,omitempty"` + + // CurrentStep The step that the index is expected to be in. + CurrentStep types.StepKey `json:"current_step"` + // NextStep The step that you want to run. + NextStep types.StepKey `json:"next_step"` } // NewRequest returns a Request diff --git a/typedapi/ilm/movetostep/response.go b/typedapi/ilm/movetostep/response.go index 9359dbc8fb..61e2c3651e 100644 --- a/typedapi/ilm/movetostep/response.go +++ b/typedapi/ilm/movetostep/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package movetostep // Response holds the response body struct for the package movetostep // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/move_to_step/MoveToStepResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/move_to_step/MoveToStepResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/putlifecycle/put_lifecycle.go b/typedapi/ilm/putlifecycle/put_lifecycle.go index 1a0693a923..c51c9a9891 100644 --- a/typedapi/ilm/putlifecycle/put_lifecycle.go +++ b/typedapi/ilm/putlifecycle/put_lifecycle.go @@ -16,10 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a lifecycle policy. If the specified policy exists, the policy is -// replaced and the policy version is incremented. +// Create or update a lifecycle policy. +// If the specified policy exists, it is replaced and the policy version is +// incremented. +// +// NOTE: Only the latest version of the policy is stored, you cannot revert to +// previous versions. package putlifecycle import ( @@ -82,8 +86,12 @@ func NewPutLifecycleFunc(tp elastictransport.Interface) NewPutLifecycle { } } -// Creates a lifecycle policy. If the specified policy exists, the policy is -// replaced and the policy version is incremented. +// Create or update a lifecycle policy. +// If the specified policy exists, it is replaced and the policy version is +// incremented. +// +// NOTE: Only the latest version of the policy is stored, you cannot revert to +// previous versions. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html func New(tp elastictransport.Interface) *PutLifecycle { @@ -93,8 +101,6 @@ func New(tp elastictransport.Interface) *PutLifecycle { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { diff --git a/typedapi/ilm/putlifecycle/request.go b/typedapi/ilm/putlifecycle/request.go index f28f91a99d..93de030618 100644 --- a/typedapi/ilm/putlifecycle/request.go +++ b/typedapi/ilm/putlifecycle/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putlifecycle @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L66 type Request struct { Policy *types.IlmPolicy `json:"policy,omitempty"` } diff --git a/typedapi/ilm/putlifecycle/response.go b/typedapi/ilm/putlifecycle/response.go index 72049852c8..9d9e3f718a 100644 --- a/typedapi/ilm/putlifecycle/response.go +++ b/typedapi/ilm/putlifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putlifecycle // Response holds the response body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/put_lifecycle/PutLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/put_lifecycle/PutLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/removepolicy/remove_policy.go b/typedapi/ilm/removepolicy/remove_policy.go index 5c04787210..014d86a4a9 100644 --- a/typedapi/ilm/removepolicy/remove_policy.go +++ b/typedapi/ilm/removepolicy/remove_policy.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Removes the assigned lifecycle policy and stops managing the specified index +// Remove policies from an index. +// Remove the assigned lifecycle policies from an index or a data stream's +// backing indices. +// It also stops managing the indices. package removepolicy import ( @@ -76,7 +79,10 @@ func NewRemovePolicyFunc(tp elastictransport.Interface) NewRemovePolicy { } } -// Removes the assigned lifecycle policy and stops managing the specified index +// Remove policies from an index. +// Remove the assigned lifecycle policies from an index or a data stream's +// backing indices. +// It also stops managing the indices. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html func New(tp elastictransport.Interface) *RemovePolicy { diff --git a/typedapi/ilm/removepolicy/response.go b/typedapi/ilm/removepolicy/response.go index f6bcb39224..fee60aaa6d 100644 --- a/typedapi/ilm/removepolicy/response.go +++ b/typedapi/ilm/removepolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package removepolicy // Response holds the response body struct for the package removepolicy // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/remove_policy/RemovePolicyResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/remove_policy/RemovePolicyResponse.ts#L22-L27 type Response struct { FailedIndexes []string `json:"failed_indexes"` HasFailures bool `json:"has_failures"` diff --git a/typedapi/ilm/retry/response.go b/typedapi/ilm/retry/response.go index 0e9a7061b6..ca9345688c 100644 --- a/typedapi/ilm/retry/response.go +++ b/typedapi/ilm/retry/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package retry // Response holds the response body struct for the package retry // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/retry/RetryIlmResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/retry/RetryIlmResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/retry/retry.go b/typedapi/ilm/retry/retry.go index 6e9d0a6b2e..49f2937158 100644 --- a/typedapi/ilm/retry/retry.go +++ b/typedapi/ilm/retry/retry.go @@ -16,9 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Retries executing the policy for an index that is in the ERROR step. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Retry a policy. +// Retry running the lifecycle policy for an index that is in the ERROR step. +// The API sets the policy back to the step where the error occurred and runs +// the step. +// Use the explain lifecycle state API to determine whether an index is in the +// ERROR step. package retry import ( @@ -76,7 +81,12 @@ func NewRetryFunc(tp elastictransport.Interface) NewRetry { } } -// Retries executing the policy for an index that is in the ERROR step. +// Retry a policy. +// Retry running the lifecycle policy for an index that is in the ERROR step. +// The API sets the policy back to the step where the error occurred and runs +// the step. +// Use the explain lifecycle state API to determine whether an index is in the +// ERROR step. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html func New(tp elastictransport.Interface) *Retry { diff --git a/typedapi/ilm/start/response.go b/typedapi/ilm/start/response.go index 91ff2af963..111afb585c 100644 --- a/typedapi/ilm/start/response.go +++ b/typedapi/ilm/start/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package start // Response holds the response body struct for the package start // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/start/StartIlmResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/start/StartIlmResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/start/start.go b/typedapi/ilm/start/start.go index 2d6ba513a1..dbcaff67d4 100644 --- a/typedapi/ilm/start/start.go +++ b/typedapi/ilm/start/start.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Start the index lifecycle management (ILM) plugin. +// Start the ILM plugin. +// Start the index lifecycle management plugin if it is currently stopped. +// ILM is started automatically when the cluster is formed. +// Restarting ILM is necessary only when it has been stopped using the stop ILM +// API. package start import ( @@ -68,7 +72,11 @@ func NewStartFunc(tp elastictransport.Interface) NewStart { } } -// Start the index lifecycle management (ILM) plugin. +// Start the ILM plugin. +// Start the index lifecycle management plugin if it is currently stopped. +// ILM is started automatically when the cluster is formed. +// Restarting ILM is necessary only when it has been stopped using the stop ILM +// API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html func New(tp elastictransport.Interface) *Start { @@ -276,6 +284,8 @@ func (r *Start) Header(key, value string) *Start { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. // API name: master_timeout func (r *Start) MasterTimeout(duration string) *Start { r.values.Set("master_timeout", duration) @@ -283,6 +293,8 @@ func (r *Start) MasterTimeout(duration string) *Start { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. // API name: timeout func (r *Start) Timeout(duration string) *Start { r.values.Set("timeout", duration) diff --git a/typedapi/ilm/stop/response.go b/typedapi/ilm/stop/response.go index 2c7c59b2a1..2804bc7c7d 100644 --- a/typedapi/ilm/stop/response.go +++ b/typedapi/ilm/stop/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stop // Response holds the response body struct for the package stop // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/stop/StopIlmResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/stop/StopIlmResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ilm/stop/stop.go b/typedapi/ilm/stop/stop.go index 69e04411c9..e228c2ed25 100644 --- a/typedapi/ilm/stop/stop.go +++ b/typedapi/ilm/stop/stop.go @@ -16,10 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Halts all lifecycle management operations and stops the index lifecycle -// management (ILM) plugin +// Stop the ILM plugin. +// Halt all lifecycle management operations and stop the index lifecycle +// management plugin. +// This is useful when you are performing maintenance on the cluster and need to +// prevent ILM from performing any actions on your indices. +// +// The API returns as soon as the stop request has been acknowledged, but the +// plugin might continue to run until in-progress operations complete and the +// plugin can be safely stopped. +// Use the get ILM status API to check whether ILM is running. package stop import ( @@ -69,8 +77,16 @@ func NewStopFunc(tp elastictransport.Interface) NewStop { } } -// Halts all lifecycle management operations and stops the index lifecycle -// management (ILM) plugin +// Stop the ILM plugin. +// Halt all lifecycle management operations and stop the index lifecycle +// management plugin. +// This is useful when you are performing maintenance on the cluster and need to +// prevent ILM from performing any actions on your indices. +// +// The API returns as soon as the stop request has been acknowledged, but the +// plugin might continue to run until in-progress operations complete and the +// plugin can be safely stopped. +// Use the get ILM status API to check whether ILM is running. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html func New(tp elastictransport.Interface) *Stop { @@ -278,6 +294,8 @@ func (r *Stop) Header(key, value string) *Stop { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. // API name: master_timeout func (r *Stop) MasterTimeout(duration string) *Stop { r.values.Set("master_timeout", duration) @@ -285,6 +303,8 @@ func (r *Stop) MasterTimeout(duration string) *Stop { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. // API name: timeout func (r *Stop) Timeout(duration string) *Stop { r.values.Set("timeout", duration) diff --git a/typedapi/indices/addblock/add_block.go b/typedapi/indices/addblock/add_block.go index 2977729907..004bb39dd1 100644 --- a/typedapi/indices/addblock/add_block.go +++ b/typedapi/indices/addblock/add_block.go @@ -16,11 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Add an index block. -// Limits the operations allowed on an index by blocking specific operation -// types. +// +// Add an index block to an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. package addblock import ( @@ -85,10 +87,12 @@ func NewAddBlockFunc(tp elastictransport.Interface) NewAddBlock { } // Add an index block. -// Limits the operations allowed on an index by blocking specific operation -// types. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html +// Add an index block to an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html#add-index-block func New(tp elastictransport.Interface) *AddBlock { r := &AddBlock{ transport: tp, @@ -304,7 +308,14 @@ func (r *AddBlock) Header(key, value string) *AddBlock { return r } -// Index A comma separated list of indices to add a block to +// Index A comma-separated list or wildcard expression of index names used to limit +// the request. +// By default, you must explicitly name the indices you are adding blocks to. +// To allow the adding of blocks to indices with `_all`, `*`, or other wildcard +// expressions, change the `action.destructive_requires_name` setting to +// `false`. +// You can update this setting in the `elasticsearch.yml` file or by using the +// cluster update settings API. // API Name: index func (r *AddBlock) _index(index string) *AddBlock { r.paramSet |= indexMask @@ -313,7 +324,7 @@ func (r *AddBlock) _index(index string) *AddBlock { return r } -// Block The block to add (one of read, write, read_only or metadata) +// Block The block type to add to the index. // API Name: block func (r *AddBlock) _block(block string) *AddBlock { r.paramSet |= blockMask @@ -322,8 +333,11 @@ func (r *AddBlock) _block(block string) *AddBlock { return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices func (r *AddBlock) AllowNoIndices(allownoindices bool) *AddBlock { r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) @@ -331,8 +345,10 @@ func (r *AddBlock) AllowNoIndices(allownoindices bool) *AddBlock { return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards func (r *AddBlock) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *AddBlock { tmp := []string{} @@ -344,8 +360,8 @@ func (r *AddBlock) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildc return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable func (r *AddBlock) IgnoreUnavailable(ignoreunavailable bool) *AddBlock { r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) @@ -353,7 +369,10 @@ func (r *AddBlock) IgnoreUnavailable(ignoreunavailable bool) *AddBlock { return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: master_timeout func (r *AddBlock) MasterTimeout(duration string) *AddBlock { r.values.Set("master_timeout", duration) @@ -361,7 +380,12 @@ func (r *AddBlock) MasterTimeout(duration string) *AddBlock { return r } -// Timeout Explicit operation timeout +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// It can also be set to `-1` to indicate that the request should never timeout. // API name: timeout func (r *AddBlock) Timeout(duration string) *AddBlock { r.values.Set("timeout", duration) diff --git a/typedapi/indices/addblock/response.go b/typedapi/indices/addblock/response.go index 31bfd899d5..513f044ded 100644 --- a/typedapi/indices/addblock/response.go +++ b/typedapi/indices/addblock/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package addblock @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package addblock // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/add_block/IndicesAddBlockResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/add_block/IndicesAddBlockResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Indices []types.IndicesBlockStatus `json:"indices"` diff --git a/typedapi/indices/analyze/analyze.go b/typedapi/indices/analyze/analyze.go index 0b775961f0..aef8abbf00 100644 --- a/typedapi/indices/analyze/analyze.go +++ b/typedapi/indices/analyze/analyze.go @@ -16,9 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Performs analysis on a text string and returns the resulting tokens. +// Get tokens from text analysis. +// The analyze API performs analysis on a text string and returns the resulting +// tokens. +// +// Generating excessive amount of tokens may cause a node to run out of memory. +// The `index.analyze.max_token_count` setting enables you to limit the number +// of tokens that can be produced. +// If more than this limit of tokens gets generated, an error occurs. +// The `_analyze` endpoint without a specified index will always use `10000` as +// its limit. package analyze import ( @@ -79,7 +88,16 @@ func NewAnalyzeFunc(tp elastictransport.Interface) NewAnalyze { } } -// Performs analysis on a text string and returns the resulting tokens. +// Get tokens from text analysis. +// The analyze API performs analysis on a text string and returns the resulting +// tokens. +// +// Generating excessive amount of tokens may cause a node to run out of memory. +// The `index.analyze.max_token_count` setting enables you to limit the number +// of tokens that can be produced. +// If more than this limit of tokens gets generated, an error occurs. +// The `_analyze` endpoint without a specified index will always use `10000` as +// its limit. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html func New(tp elastictransport.Interface) *Analyze { @@ -89,8 +107,6 @@ func New(tp elastictransport.Interface) *Analyze { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -363,83 +379,132 @@ func (r *Analyze) Pretty(pretty bool) *Analyze { return r } -// Analyzer The name of the analyzer that should be applied to the provided `text`. +// The name of the analyzer that should be applied to the provided `text`. // This could be a built-in analyzer, or an analyzer that’s been configured in // the index. // API name: analyzer func (r *Analyze) Analyzer(analyzer string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Analyzer = &analyzer return r } -// Attributes Array of token attributes used to filter the output of the `explain` +// Array of token attributes used to filter the output of the `explain` // parameter. // API name: attributes func (r *Analyze) Attributes(attributes ...string) *Analyze { - r.req.Attributes = attributes + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range attributes { + + r.req.Attributes = append(r.req.Attributes, v) + } return r } -// CharFilter Array of character filters used to preprocess characters before the +// Array of character filters used to preprocess characters before the // tokenizer. // API name: char_filter -func (r *Analyze) CharFilter(charfilters ...types.CharFilter) *Analyze { - r.req.CharFilter = charfilters +func (r *Analyze) CharFilter(charfilters ...types.CharFilterVariant) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range charfilters { + + r.req.CharFilter = append(r.req.CharFilter, *v.CharFilterCaster()) + } return r } -// Explain If `true`, the response includes token attributes and additional details. +// If `true`, the response includes token attributes and additional details. // API name: explain func (r *Analyze) Explain(explain bool) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Explain = &explain return r } -// Field Field used to derive the analyzer. +// Field used to derive the analyzer. // To use this parameter, you must specify an index. // If specified, the `analyzer` parameter overrides this value. // API name: field func (r *Analyze) Field(field string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Field = &field return r } -// Filter Array of token filters used to apply after the tokenizer. +// Array of token filters used to apply after the tokenizer. // API name: filter -func (r *Analyze) Filter(filters ...types.TokenFilter) *Analyze { - r.req.Filter = filters +func (r *Analyze) Filter(filters ...types.TokenFilterVariant) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range filters { + + r.req.Filter = append(r.req.Filter, *v.TokenFilterCaster()) + } return r } -// Normalizer Normalizer to use to convert text into a single token. +// Normalizer to use to convert text into a single token. // API name: normalizer func (r *Analyze) Normalizer(normalizer string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Normalizer = &normalizer return r } -// Text Text to analyze. +// Text to analyze. // If an array of strings is provided, it is analyzed as a multi-value field. // API name: text func (r *Analyze) Text(texttoanalyzes ...string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Text = texttoanalyzes return r } -// Tokenizer Tokenizer to use to convert text into tokens. +// Tokenizer to use to convert text into tokens. // API name: tokenizer -func (r *Analyze) Tokenizer(tokenizer types.Tokenizer) *Analyze { - r.req.Tokenizer = tokenizer +func (r *Analyze) Tokenizer(tokenizer types.TokenizerVariant) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Tokenizer = *tokenizer.TokenizerCaster() return r } diff --git a/typedapi/indices/analyze/request.go b/typedapi/indices/analyze/request.go index b3d1adedd3..5b032dc6be 100644 --- a/typedapi/indices/analyze/request.go +++ b/typedapi/indices/analyze/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package analyze @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package analyze // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/analyze/IndicesAnalyzeRequest.ts#L27-L92 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/analyze/IndicesAnalyzeRequest.ts#L27-L110 type Request struct { // Analyzer The name of the analyzer that should be applied to the provided `text`. @@ -130,37 +130,37 @@ func (s *Request) UnmarshalJSON(data []byte) error { case "html_strip": o := types.NewHtmlStripCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "html_strip", err) } s.CharFilter = append(s.CharFilter, *o) case "mapping": o := types.NewMappingCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "mapping", err) } s.CharFilter = append(s.CharFilter, *o) case "pattern_replace": o := types.NewPatternReplaceCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_replace", err) } s.CharFilter = append(s.CharFilter, *o) case "icu_normalizer": o := types.NewIcuNormalizationCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_normalizer", err) } s.CharFilter = append(s.CharFilter, *o) case "kuromoji_iteration_mark": o := types.NewKuromojiIterationMarkCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_iteration_mark", err) } s.CharFilter = append(s.CharFilter, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter = append(s.CharFilter, *o) } @@ -202,289 +202,289 @@ func (s *Request) UnmarshalJSON(data []byte) error { case "asciifolding": o := types.NewAsciiFoldingTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "asciifolding", err) } s.Filter = append(s.Filter, *o) case "common_grams": o := types.NewCommonGramsTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "common_grams", err) } s.Filter = append(s.Filter, *o) case "condition": o := types.NewConditionTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "condition", err) } s.Filter = append(s.Filter, *o) case "delimited_payload": o := types.NewDelimitedPayloadTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "delimited_payload", err) } s.Filter = append(s.Filter, *o) case "edge_ngram": o := types.NewEdgeNGramTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "edge_ngram", err) } s.Filter = append(s.Filter, *o) case "elision": o := types.NewElisionTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "elision", err) } s.Filter = append(s.Filter, *o) case "fingerprint": o := types.NewFingerprintTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "fingerprint", err) } s.Filter = append(s.Filter, *o) case "hunspell": o := types.NewHunspellTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "hunspell", err) } s.Filter = append(s.Filter, *o) case "hyphenation_decompounder": o := types.NewHyphenationDecompounderTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "hyphenation_decompounder", err) } s.Filter = append(s.Filter, *o) case "keep_types": o := types.NewKeepTypesTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keep_types", err) } s.Filter = append(s.Filter, *o) case "keep": o := types.NewKeepWordsTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keep", err) } s.Filter = append(s.Filter, *o) case "keyword_marker": o := types.NewKeywordMarkerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword_marker", err) } s.Filter = append(s.Filter, *o) case "kstem": o := types.NewKStemTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kstem", err) } s.Filter = append(s.Filter, *o) case "length": o := types.NewLengthTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "length", err) } s.Filter = append(s.Filter, *o) case "limit": o := types.NewLimitTokenCountTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "limit", err) } s.Filter = append(s.Filter, *o) case "lowercase": o := types.NewLowercaseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "lowercase", err) } s.Filter = append(s.Filter, *o) case "multiplexer": o := types.NewMultiplexerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "multiplexer", err) } s.Filter = append(s.Filter, *o) case "ngram": o := types.NewNGramTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ngram", err) } s.Filter = append(s.Filter, *o) case "nori_part_of_speech": o := types.NewNoriPartOfSpeechTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nori_part_of_speech", err) } s.Filter = append(s.Filter, *o) case "pattern_capture": o := types.NewPatternCaptureTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_capture", err) } s.Filter = append(s.Filter, *o) case "pattern_replace": o := types.NewPatternReplaceTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_replace", err) } s.Filter = append(s.Filter, *o) case "porter_stem": o := types.NewPorterStemTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "porter_stem", err) } s.Filter = append(s.Filter, *o) case "predicate_token_filter": o := types.NewPredicateTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "predicate_token_filter", err) } s.Filter = append(s.Filter, *o) case "remove_duplicates": o := types.NewRemoveDuplicatesTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "remove_duplicates", err) } s.Filter = append(s.Filter, *o) case "reverse": o := types.NewReverseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "reverse", err) } s.Filter = append(s.Filter, *o) case "shingle": o := types.NewShingleTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "shingle", err) } s.Filter = append(s.Filter, *o) case "snowball": o := types.NewSnowballTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "snowball", err) } s.Filter = append(s.Filter, *o) case "stemmer_override": o := types.NewStemmerOverrideTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stemmer_override", err) } s.Filter = append(s.Filter, *o) case "stemmer": o := types.NewStemmerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stemmer", err) } s.Filter = append(s.Filter, *o) case "stop": o := types.NewStopTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stop", err) } s.Filter = append(s.Filter, *o) case "synonym_graph": o := types.NewSynonymGraphTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "synonym_graph", err) } s.Filter = append(s.Filter, *o) case "synonym": o := types.NewSynonymTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "synonym", err) } s.Filter = append(s.Filter, *o) case "trim": o := types.NewTrimTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "trim", err) } s.Filter = append(s.Filter, *o) case "truncate": o := types.NewTruncateTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "truncate", err) } s.Filter = append(s.Filter, *o) case "unique": o := types.NewUniqueTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "unique", err) } s.Filter = append(s.Filter, *o) case "uppercase": o := types.NewUppercaseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "uppercase", err) } s.Filter = append(s.Filter, *o) case "word_delimiter_graph": o := types.NewWordDelimiterGraphTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "word_delimiter_graph", err) } s.Filter = append(s.Filter, *o) case "word_delimiter": o := types.NewWordDelimiterTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "word_delimiter", err) } s.Filter = append(s.Filter, *o) case "kuromoji_stemmer": o := types.NewKuromojiStemmerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_stemmer", err) } s.Filter = append(s.Filter, *o) case "kuromoji_readingform": o := types.NewKuromojiReadingFormTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_readingform", err) } s.Filter = append(s.Filter, *o) case "kuromoji_part_of_speech": o := types.NewKuromojiPartOfSpeechTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_part_of_speech", err) } s.Filter = append(s.Filter, *o) case "icu_collation": o := types.NewIcuCollationTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_collation", err) } s.Filter = append(s.Filter, *o) case "icu_folding": o := types.NewIcuFoldingTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_folding", err) } s.Filter = append(s.Filter, *o) case "icu_normalizer": o := types.NewIcuNormalizationTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_normalizer", err) } s.Filter = append(s.Filter, *o) case "icu_transform": o := types.NewIcuTransformTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_transform", err) } s.Filter = append(s.Filter, *o) case "phonetic": o := types.NewPhoneticTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "phonetic", err) } s.Filter = append(s.Filter, *o) case "dictionary_decompounder": o := types.NewDictionaryDecompounderTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "dictionary_decompounder", err) } s.Filter = append(s.Filter, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter = append(s.Filter, *o) } @@ -533,90 +533,114 @@ func (s *Request) UnmarshalJSON(data []byte) error { case "char_group": o := types.NewCharGroupTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "char_group", err) + } + s.Tokenizer = *o + case "classic": + o := types.NewClassicTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "classic", err) } s.Tokenizer = *o case "edge_ngram": o := types.NewEdgeNGramTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "edge_ngram", err) } s.Tokenizer = *o case "keyword": o := types.NewKeywordTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword", err) } s.Tokenizer = *o case "letter": o := types.NewLetterTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "letter", err) } s.Tokenizer = *o case "lowercase": o := types.NewLowercaseTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "lowercase", err) } s.Tokenizer = *o case "ngram": o := types.NewNGramTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ngram", err) } s.Tokenizer = *o - case "nori_tokenizer": - o := types.NewNoriTokenizer() + case "path_hierarchy": + o := types.NewPathHierarchyTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "path_hierarchy", err) } s.Tokenizer = *o - case "path_hierarchy": - o := types.NewPathHierarchyTokenizer() + case "pattern": + o := types.NewPatternTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "pattern", err) + } + s.Tokenizer = *o + case "simple_pattern": + o := types.NewSimplePatternTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "simple_pattern", err) + } + s.Tokenizer = *o + case "simple_pattern_split": + o := types.NewSimplePatternSplitTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "simple_pattern_split", err) } s.Tokenizer = *o case "standard": o := types.NewStandardTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "standard", err) + } + s.Tokenizer = *o + case "thai": + o := types.NewThaiTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "thai", err) } s.Tokenizer = *o case "uax_url_email": o := types.NewUaxEmailUrlTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "uax_url_email", err) } s.Tokenizer = *o case "whitespace": o := types.NewWhitespaceTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "whitespace", err) } s.Tokenizer = *o - case "kuromoji_tokenizer": - o := types.NewKuromojiTokenizer() + case "icu_tokenizer": + o := types.NewIcuTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_tokenizer", err) } s.Tokenizer = *o - case "pattern": - o := types.NewPatternTokenizer() + case "kuromoji_tokenizer": + o := types.NewKuromojiTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_tokenizer", err) } s.Tokenizer = *o - case "icu_tokenizer": - o := types.NewIcuTokenizer() + case "nori_tokenizer": + o := types.NewNoriTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nori_tokenizer", err) } s.Tokenizer = *o default: if err := localDec.Decode(&s.Tokenizer); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } } diff --git a/typedapi/indices/analyze/response.go b/typedapi/indices/analyze/response.go index 3b9a354295..c93dbf17ed 100644 --- a/typedapi/indices/analyze/response.go +++ b/typedapi/indices/analyze/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package analyze @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package analyze // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/analyze/IndicesAnalyzeResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/analyze/IndicesAnalyzeResponse.ts#L22-L27 type Response struct { Detail *types.AnalyzeDetail `json:"detail,omitempty"` Tokens []types.AnalyzeToken `json:"tokens,omitempty"` diff --git a/typedapi/indices/cancelmigratereindex/cancel_migrate_reindex.go b/typedapi/indices/cancelmigratereindex/cancel_migrate_reindex.go new file mode 100644 index 0000000000..6efe88740f --- /dev/null +++ b/typedapi/indices/cancelmigratereindex/cancel_migrate_reindex.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Cancel a migration reindex operation. +// +// Cancel a migration reindex attempt for a data stream or index. +package cancelmigratereindex + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CancelMigrateReindex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCancelMigrateReindex type alias for index. +type NewCancelMigrateReindex func(index string) *CancelMigrateReindex + +// NewCancelMigrateReindexFunc returns a new instance of CancelMigrateReindex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCancelMigrateReindexFunc(tp elastictransport.Interface) NewCancelMigrateReindex { + return func(index string) *CancelMigrateReindex { + n := New(tp) + + n._index(index) + + return n + } +} + +// Cancel a migration reindex operation. +// +// Cancel a migration reindex attempt for a data stream or index. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html +func New(tp elastictransport.Interface) *CancelMigrateReindex { + r := &CancelMigrateReindex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CancelMigrateReindex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_cancel") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CancelMigrateReindex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.cancel_migrate_reindex") + if reader := instrument.RecordRequestBody(ctx, "indices.cancel_migrate_reindex", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.cancel_migrate_reindex") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CancelMigrateReindex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a cancelmigratereindex.Response +func (r CancelMigrateReindex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r CancelMigrateReindex) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the CancelMigrateReindex query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the CancelMigrateReindex headers map. +func (r *CancelMigrateReindex) Header(key, value string) *CancelMigrateReindex { + r.headers.Set(key, value) + + return r +} + +// Index The index or data stream name +// API Name: index +func (r *CancelMigrateReindex) _index(index string) *CancelMigrateReindex { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CancelMigrateReindex) ErrorTrace(errortrace bool) *CancelMigrateReindex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CancelMigrateReindex) FilterPath(filterpaths ...string) *CancelMigrateReindex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CancelMigrateReindex) Human(human bool) *CancelMigrateReindex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CancelMigrateReindex) Pretty(pretty bool) *CancelMigrateReindex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/indices/cancelmigratereindex/response.go b/typedapi/indices/cancelmigratereindex/response.go new file mode 100644 index 0000000000..c7ab78aee7 --- /dev/null +++ b/typedapi/indices/cancelmigratereindex/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package cancelmigratereindex + +// Response holds the response body struct for the package cancelmigratereindex +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/cancel_migrate_reindex/MigrateCancelReindexResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/indices/clearcache/clear_cache.go b/typedapi/indices/clearcache/clear_cache.go index 6a66336e31..51842ff654 100644 --- a/typedapi/indices/clearcache/clear_cache.go +++ b/typedapi/indices/clearcache/clear_cache.go @@ -16,10 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Clears the caches of one or more indices. -// For data streams, the API clears the caches of the stream’s backing indices. +// Clear the cache. +// Clear the cache of one or more indices. +// For data streams, the API clears the caches of the stream's backing indices. +// +// By default, the clear cache API clears all caches. +// To clear only specific caches, use the `fielddata`, `query`, or `request` +// parameters. +// To clear the cache only of specific fields, use the `fields` parameter. package clearcache import ( @@ -76,8 +82,14 @@ func NewClearCacheFunc(tp elastictransport.Interface) NewClearCache { } } -// Clears the caches of one or more indices. -// For data streams, the API clears the caches of the stream’s backing indices. +// Clear the cache. +// Clear the cache of one or more indices. +// For data streams, the API clears the caches of the stream's backing indices. +// +// By default, the clear cache API clears all caches. +// To clear only specific caches, use the `fielddata`, `query`, or `request` +// parameters. +// To clear the cache only of specific fields, use the `fields` parameter. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html func New(tp elastictransport.Interface) *ClearCache { diff --git a/typedapi/indices/clearcache/response.go b/typedapi/indices/clearcache/response.go index c10caf0f3a..4fed84b01d 100644 --- a/typedapi/indices/clearcache/response.go +++ b/typedapi/indices/clearcache/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearcache @@ -26,9 +26,9 @@ import ( // Response holds the response body struct for the package clearcache // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/clear_cache/IndicesClearCacheResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/clear_cache/IndicesClearCacheResponse.ts#L22-L24 type Response struct { - Shards_ types.ShardStatistics `json:"_shards"` + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/indices/clone/clone.go b/typedapi/indices/clone/clone.go index 63dfa89796..19fef5612f 100644 --- a/typedapi/indices/clone/clone.go +++ b/typedapi/indices/clone/clone.go @@ -16,9 +16,79 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Clones an existing index. +// Clone an index. +// Clone an existing index into a new index. +// Each original primary shard is cloned into a new primary shard in the new +// index. +// +// IMPORTANT: Elasticsearch does not apply index templates to the resulting +// index. +// The API also does not copy index metadata from the original index. +// Index metadata includes aliases, index lifecycle management phase +// definitions, and cross-cluster replication (CCR) follower information. +// For example, if you clone a CCR follower index, the resulting clone will not +// be a follower index. +// +// The clone API copies most index settings from the source index to the +// resulting index, with the exception of `index.number_of_replicas` and +// `index.auto_expand_replicas`. +// To set the number of replicas in the resulting index, configure these +// settings in the clone request. +// +// Cloning works as follows: +// +// * First, it creates a new target index with the same definition as the source +// index. +// * Then it hard-links segments from the source index into the target index. If +// the file system does not support hard-linking, all segments are copied into +// the new index, which is a much more time consuming process. +// * Finally, it recovers the target index as though it were a closed index +// which had just been re-opened. +// +// IMPORTANT: Indices can only be cloned if they meet the following +// requirements: +// +// * The index must be marked as read-only and have a cluster health status of +// green. +// * The target index must not exist. +// * The source index must have the same number of primary shards as the target +// index. +// * The node handling the clone process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// +// The current write index on a data stream cannot be cloned. +// In order to clone the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be cloned. +// +// NOTE: Mappings cannot be specified in the `_clone` request. The mappings of +// the source index will be used for the target index. +// +// **Monitor the cloning process** +// +// The cloning process can be monitored with the cat recovery API or the cluster +// health API can be used to wait until all primary shards have been allocated +// by setting the `wait_for_status` parameter to `yellow`. +// +// The `_clone` API returns as soon as the target index has been added to the +// cluster state, before any shards have been allocated. +// At this point, all shards are in the state unassigned. +// If, for any reason, the target index can't be allocated, its primary shard +// will remain unassigned until it can be allocated on that node. +// +// Once the primary shard is allocated, it moves to state initializing, and the +// clone process begins. +// When the clone operation completes, the shard will become active. +// At that point, Elasticsearch will try to allocate any replicas and may decide +// to relocate the primary shard to another node. +// +// **Wait for active shards** +// +// Because the clone operation creates a new index to clone the shards to, the +// wait for active shards setting on index creation applies to the clone index +// action as well. package clone import ( @@ -86,7 +156,77 @@ func NewCloneFunc(tp elastictransport.Interface) NewClone { } } -// Clones an existing index. +// Clone an index. +// Clone an existing index into a new index. +// Each original primary shard is cloned into a new primary shard in the new +// index. +// +// IMPORTANT: Elasticsearch does not apply index templates to the resulting +// index. +// The API also does not copy index metadata from the original index. +// Index metadata includes aliases, index lifecycle management phase +// definitions, and cross-cluster replication (CCR) follower information. +// For example, if you clone a CCR follower index, the resulting clone will not +// be a follower index. +// +// The clone API copies most index settings from the source index to the +// resulting index, with the exception of `index.number_of_replicas` and +// `index.auto_expand_replicas`. +// To set the number of replicas in the resulting index, configure these +// settings in the clone request. +// +// Cloning works as follows: +// +// * First, it creates a new target index with the same definition as the source +// index. +// * Then it hard-links segments from the source index into the target index. If +// the file system does not support hard-linking, all segments are copied into +// the new index, which is a much more time consuming process. +// * Finally, it recovers the target index as though it were a closed index +// which had just been re-opened. +// +// IMPORTANT: Indices can only be cloned if they meet the following +// requirements: +// +// * The index must be marked as read-only and have a cluster health status of +// green. +// * The target index must not exist. +// * The source index must have the same number of primary shards as the target +// index. +// * The node handling the clone process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// +// The current write index on a data stream cannot be cloned. +// In order to clone the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be cloned. +// +// NOTE: Mappings cannot be specified in the `_clone` request. The mappings of +// the source index will be used for the target index. +// +// **Monitor the cloning process** +// +// The cloning process can be monitored with the cat recovery API or the cluster +// health API can be used to wait until all primary shards have been allocated +// by setting the `wait_for_status` parameter to `yellow`. +// +// The `_clone` API returns as soon as the target index has been added to the +// cluster state, before any shards have been allocated. +// At this point, all shards are in the state unassigned. +// If, for any reason, the target index can't be allocated, its primary shard +// will remain unassigned until it can be allocated on that node. +// +// Once the primary shard is allocated, it moves to state initializing, and the +// clone process begins. +// When the clone operation completes, the shard will become active. +// At that point, Elasticsearch will try to allocate any replicas and may decide +// to relocate the primary shard to another node. +// +// **Wait for active shards** +// +// Because the clone operation creates a new index to clone the shards to, the +// wait for active shards setting on index creation applies to the clone index +// action as well. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clone-index.html func New(tp elastictransport.Interface) *Clone { @@ -96,8 +236,6 @@ func New(tp elastictransport.Interface) *Clone { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -408,20 +546,62 @@ func (r *Clone) Pretty(pretty bool) *Clone { return r } -// Aliases Aliases for the resulting index. +// Aliases for the resulting index. // API name: aliases func (r *Clone) Aliases(aliases map[string]types.Alias) *Clone { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Clone) AddAlias(key string, value types.AliasVariant) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// Settings Configuration options for the target index. +// Configuration options for the target index. // API name: settings func (r *Clone) Settings(settings map[string]json.RawMessage) *Clone { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *Clone) AddSetting(key string, value json.RawMessage) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/indices/clone/request.go b/typedapi/indices/clone/request.go index e692bf1973..136446d1e1 100644 --- a/typedapi/indices/clone/request.go +++ b/typedapi/indices/clone/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clone @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/clone/IndicesCloneRequest.ts#L27-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/clone/IndicesCloneRequest.ts#L27-L127 type Request struct { // Aliases Aliases for the resulting index. diff --git a/typedapi/indices/clone/response.go b/typedapi/indices/clone/response.go index 1fd0cadf57..97bb92ea50 100644 --- a/typedapi/indices/clone/response.go +++ b/typedapi/indices/clone/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clone // Response holds the response body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/clone/IndicesCloneResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/clone/IndicesCloneResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Index string `json:"index"` diff --git a/typedapi/indices/close/close.go b/typedapi/indices/close/close.go index 861e94e7c7..a7550c395e 100644 --- a/typedapi/indices/close/close.go +++ b/typedapi/indices/close/close.go @@ -16,9 +16,37 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Closes an index. +// Close an index. +// A closed index is blocked for read or write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// Closed indices do not have to maintain internal data structures for indexing +// or searching documents, which results in a smaller overhead on the cluster. +// +// When opening or closing an index, the master node is responsible for +// restarting the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened and closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behaviour can be turned off using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the` action.destructive_requires_name` setting to `false`. This +// setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. package close import ( @@ -77,7 +105,35 @@ func NewCloseFunc(tp elastictransport.Interface) NewClose { } } -// Closes an index. +// Close an index. +// A closed index is blocked for read or write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// Closed indices do not have to maintain internal data structures for indexing +// or searching documents, which results in a smaller overhead on the cluster. +// +// When opening or closing an index, the master node is responsible for +// restarting the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened and closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behaviour can be turned off using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the` action.destructive_requires_name` setting to `false`. This +// setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-close.html func New(tp elastictransport.Interface) *Close { diff --git a/typedapi/indices/close/response.go b/typedapi/indices/close/response.go index c097a73618..b41d9a0f9b 100644 --- a/typedapi/indices/close/response.go +++ b/typedapi/indices/close/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package close @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package close // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/close/CloseIndexResponse.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/close/CloseIndexResponse.ts#L24-L30 type Response struct { Acknowledged bool `json:"acknowledged"` Indices map[string]types.CloseIndexResult `json:"indices"` diff --git a/typedapi/indices/create/create.go b/typedapi/indices/create/create.go index 381e2e97e1..1ae6cfe474 100644 --- a/typedapi/indices/create/create.go +++ b/typedapi/indices/create/create.go @@ -16,10 +16,43 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create an index. -// Creates a new index. +// You can use the create index API to add a new index to an Elasticsearch +// cluster. +// When creating an index, you can specify the following: +// +// * Settings for the index. +// * Mappings for fields in the index. +// * Index aliases +// +// **Wait for active shards** +// +// By default, index creation will only return a response to the client when the +// primary copies of each shard have been started, or the request times out. +// The index creation response will indicate what happened. +// For example, `acknowledged` indicates whether the index was successfully +// created in the cluster, `while shards_acknowledged` indicates whether the +// requisite number of shard copies were started for each shard in the index +// before timing out. +// Note that it is still possible for either `acknowledged` or +// `shards_acknowledged` to be `false`, but for the index creation to be +// successful. +// These values simply indicate whether the operation completed before the +// timeout. +// If `acknowledged` is false, the request timed out before the cluster state +// was updated with the newly created index, but it probably will be created +// sometime soon. +// If `shards_acknowledged` is false, then the request timed out before the +// requisite number of shards were started (by default just the primaries), even +// if the cluster state was successfully updated to reflect the newly created +// index (that is to say, `acknowledged` is `true`). +// +// You can change the default of only waiting for the primary shards to start +// through the index setting `index.write.wait_for_active_shards`. +// Note that changing this setting will also affect the `wait_for_active_shards` +// value on all subsequent write operations. package create import ( @@ -83,7 +116,40 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { } // Create an index. -// Creates a new index. +// You can use the create index API to add a new index to an Elasticsearch +// cluster. +// When creating an index, you can specify the following: +// +// * Settings for the index. +// * Mappings for fields in the index. +// * Index aliases +// +// **Wait for active shards** +// +// By default, index creation will only return a response to the client when the +// primary copies of each shard have been started, or the request times out. +// The index creation response will indicate what happened. +// For example, `acknowledged` indicates whether the index was successfully +// created in the cluster, `while shards_acknowledged` indicates whether the +// requisite number of shard copies were started for each shard in the index +// before timing out. +// Note that it is still possible for either `acknowledged` or +// `shards_acknowledged` to be `false`, but for the index creation to be +// successful. +// These values simply indicate whether the operation completed before the +// timeout. +// If `acknowledged` is false, the request timed out before the cluster state +// was updated with the newly created index, but it probably will be created +// sometime soon. +// If `shards_acknowledged` is false, then the request timed out before the +// requisite number of shards were started (by default just the primaries), even +// if the cluster state was successfully updated to reflect the newly created +// index (that is to say, `acknowledged` is `true`). +// +// You can change the default of only waiting for the primary shards to start +// through the index setting `index.write.wait_for_active_shards`. +// Note that changing this setting will also affect the `wait_for_active_shards` +// value on all subsequent write operations. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html func New(tp elastictransport.Interface) *Create { @@ -93,8 +159,6 @@ func New(tp elastictransport.Interface) *Create { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -388,32 +452,61 @@ func (r *Create) Pretty(pretty bool) *Create { return r } -// Aliases Aliases for the index. +// Aliases for the index. // API name: aliases func (r *Create) Aliases(aliases map[string]types.Alias) *Create { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Create) AddAlias(key string, value types.AliasVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// Mappings Mapping for fields in the index. If specified, this mapping can include: +// Mapping for fields in the index. If specified, this mapping can include: // - Field names // - Field data types // - Mapping parameters // API name: mappings -func (r *Create) Mappings(mappings *types.TypeMapping) *Create { +func (r *Create) Mappings(mappings types.TypeMappingVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Mappings = mappings + r.req.Mappings = mappings.TypeMappingCaster() return r } -// Settings Configuration options for the index. +// Configuration options for the index. // API name: settings -func (r *Create) Settings(settings *types.IndexSettings) *Create { +func (r *Create) Settings(settings types.IndexSettingsVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.IndexSettingsCaster() return r } diff --git a/typedapi/indices/create/request.go b/typedapi/indices/create/request.go index f9ed43dad6..0f095ff0b7 100644 --- a/typedapi/indices/create/request.go +++ b/typedapi/indices/create/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package create @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/create/IndicesCreateRequest.ts#L28-L82 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/create/IndicesCreateRequest.ts#L28-L108 type Request struct { // Aliases Aliases for the index. diff --git a/typedapi/indices/create/response.go b/typedapi/indices/create/response.go index 634dad9107..60fd23626a 100644 --- a/typedapi/indices/create/response.go +++ b/typedapi/indices/create/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package create // Response holds the response body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/create/IndicesCreateResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/create/IndicesCreateResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Index string `json:"index"` diff --git a/typedapi/indices/createdatastream/create_data_stream.go b/typedapi/indices/createdatastream/create_data_stream.go index 3236f2c537..385e76c2f3 100644 --- a/typedapi/indices/createdatastream/create_data_stream.go +++ b/typedapi/indices/createdatastream/create_data_stream.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create a data stream. -// Creates a data stream. +// // You must have a matching index template with data stream enabled. package createdatastream @@ -79,10 +79,10 @@ func NewCreateDataStreamFunc(tp elastictransport.Interface) NewCreateDataStream } // Create a data stream. -// Creates a data stream. +// // You must have a matching index template with data stream enabled. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-data-stream.html func New(tp elastictransport.Interface) *CreateDataStream { r := &CreateDataStream{ transport: tp, @@ -308,6 +308,24 @@ func (r *CreateDataStream) _name(name string) *CreateDataStream { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *CreateDataStream) MasterTimeout(duration string) *CreateDataStream { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *CreateDataStream) Timeout(duration string) *CreateDataStream { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/createdatastream/response.go b/typedapi/indices/createdatastream/response.go index 430115995f..39edb179c4 100644 --- a/typedapi/indices/createdatastream/response.go +++ b/typedapi/indices/createdatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package createdatastream // Response holds the response body struct for the package createdatastream // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/create_data_stream/IndicesCreateDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/create_data_stream/IndicesCreateDataStreamResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/createfrom/create_from.go b/typedapi/indices/createfrom/create_from.go new file mode 100644 index 0000000000..22c5cf17b8 --- /dev/null +++ b/typedapi/indices/createfrom/create_from.go @@ -0,0 +1,421 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Create an index from a source index. +// +// Copy the mappings and settings from the source index to a destination index +// while allowing request settings and mappings to override the source values. +package createfrom + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + sourceMask = iota + 1 + + destMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CreateFrom struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + source string + dest string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreateFrom type alias for index. +type NewCreateFrom func(source, dest string) *CreateFrom + +// NewCreateFromFunc returns a new instance of CreateFrom with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateFromFunc(tp elastictransport.Interface) NewCreateFrom { + return func(source, dest string) *CreateFrom { + n := New(tp) + + n._source(source) + + n._dest(dest) + + return n + } +} + +// Create an index from a source index. +// +// Copy the mappings and settings from the source index to a destination index +// while allowing request settings and mappings to override the source values. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html +func New(tp elastictransport.Interface) *CreateFrom { + r := &CreateFrom{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *CreateFrom) Raw(raw io.Reader) *CreateFrom { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *CreateFrom) Request(req *Request) *CreateFrom { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CreateFrom) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for CreateFrom: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == sourceMask|destMask: + path.WriteString("/") + path.WriteString("_create_from") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "source", r.source) + } + path.WriteString(r.source) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "dest", r.dest) + } + path.WriteString(r.dest) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CreateFrom) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.create_from") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create_from") + if reader := instrument.RecordRequestBody(ctx, "indices.create_from", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create_from") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CreateFrom query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a createfrom.Response +func (r CreateFrom) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create_from") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the CreateFrom headers map. +func (r *CreateFrom) Header(key, value string) *CreateFrom { + r.headers.Set(key, value) + + return r +} + +// Source The source index or data stream name +// API Name: source +func (r *CreateFrom) _source(source string) *CreateFrom { + r.paramSet |= sourceMask + r.source = source + + return r +} + +// Dest The destination index or data stream name +// API Name: dest +func (r *CreateFrom) _dest(dest string) *CreateFrom { + r.paramSet |= destMask + r.dest = dest + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CreateFrom) ErrorTrace(errortrace bool) *CreateFrom { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CreateFrom) FilterPath(filterpaths ...string) *CreateFrom { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CreateFrom) Human(human bool) *CreateFrom { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CreateFrom) Pretty(pretty bool) *CreateFrom { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Mappings overrides to be applied to the destination index (optional) +// API name: mappings_override +func (r *CreateFrom) MappingsOverride(mappingsoverride types.TypeMappingVariant) *CreateFrom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MappingsOverride = mappingsoverride.TypeMappingCaster() + + return r +} + +// If index blocks should be removed when creating destination index (optional) +// API name: remove_index_blocks +func (r *CreateFrom) RemoveIndexBlocks(removeindexblocks bool) *CreateFrom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RemoveIndexBlocks = &removeindexblocks + + return r +} + +// Settings overrides to be applied to the destination index (optional) +// API name: settings_override +func (r *CreateFrom) SettingsOverride(settingsoverride types.IndexSettingsVariant) *CreateFrom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SettingsOverride = settingsoverride.IndexSettingsCaster() + + return r +} diff --git a/typedapi/indices/createfrom/request.go b/typedapi/indices/createfrom/request.go new file mode 100644 index 0000000000..858bbaf691 --- /dev/null +++ b/typedapi/indices/createfrom/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package createfrom + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package createfrom +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/create_from/MigrateCreateFromRequest.ts#L25-L44 +type Request = types.CreateFrom + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewCreateFrom() + + return r +} diff --git a/typedapi/indices/createfrom/response.go b/typedapi/indices/createfrom/response.go new file mode 100644 index 0000000000..4de56f06da --- /dev/null +++ b/typedapi/indices/createfrom/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package createfrom + +// Response holds the response body struct for the package createfrom +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/create_from/MigrateCreateFromResponse.ts#L22-L28 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Index string `json:"index"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/indices/datastreamsstats/data_streams_stats.go b/typedapi/indices/datastreamsstats/data_streams_stats.go index 7a1d9e498a..b591ccdd04 100644 --- a/typedapi/indices/datastreamsstats/data_streams_stats.go +++ b/typedapi/indices/datastreamsstats/data_streams_stats.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get data stream stats. -// Retrieves statistics for one or more data streams. +// +// Get statistics for one or more data streams. package datastreamsstats import ( @@ -77,9 +78,10 @@ func NewDataStreamsStatsFunc(tp elastictransport.Interface) NewDataStreamsStats } // Get data stream stats. -// Retrieves statistics for one or more data streams. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// Get statistics for one or more data streams. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-stream-stats-api.html func New(tp elastictransport.Interface) *DataStreamsStats { r := &DataStreamsStats{ transport: tp, diff --git a/typedapi/indices/datastreamsstats/response.go b/typedapi/indices/datastreamsstats/response.go index aa933fe880..8f05eea998 100644 --- a/typedapi/indices/datastreamsstats/response.go +++ b/typedapi/indices/datastreamsstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package datastreamsstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package datastreamsstats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L25-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L25-L43 type Response struct { // BackingIndices Total number of backing indices for the selected data streams. diff --git a/typedapi/indices/delete/delete.go b/typedapi/indices/delete/delete.go index 30fc37e6cb..f3622626f9 100644 --- a/typedapi/indices/delete/delete.go +++ b/typedapi/indices/delete/delete.go @@ -16,10 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete indices. -// Deletes one or more indices. +// Deleting an index deletes its documents, shards, and metadata. +// It does not delete related Kibana components, such as data views, +// visualizations, or dashboards. +// +// You cannot delete the current write index of a data stream. +// To delete the index, you must roll over the data stream so a new write index +// is created. +// You can then use the delete index API to delete the previous write index. package delete import ( @@ -79,7 +86,14 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } // Delete indices. -// Deletes one or more indices. +// Deleting an index deletes its documents, shards, and metadata. +// It does not delete related Kibana components, such as data views, +// visualizations, or dashboards. +// +// You cannot delete the current write index of a data stream. +// To delete the index, you must roll over the data stream so a new write index +// is created. +// You can then use the delete index API to delete the previous write index. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html func New(tp elastictransport.Interface) *Delete { diff --git a/typedapi/indices/delete/response.go b/typedapi/indices/delete/response.go index 6f5fa7a0fa..5c4fce7f7f 100644 --- a/typedapi/indices/delete/response.go +++ b/typedapi/indices/delete/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package delete @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/delete/IndicesDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/delete/IndicesDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deletealias/delete_alias.go b/typedapi/indices/deletealias/delete_alias.go index def21f8d64..e88de1fb89 100644 --- a/typedapi/indices/deletealias/delete_alias.go +++ b/typedapi/indices/deletealias/delete_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete an alias. // Removes a data stream or index from an alias. @@ -85,7 +85,7 @@ func NewDeleteAliasFunc(tp elastictransport.Interface) NewDeleteAlias { // Delete an alias. // Removes a data stream or index from an alias. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-alias.html func New(tp elastictransport.Interface) *DeleteAlias { r := &DeleteAlias{ transport: tp, diff --git a/typedapi/indices/deletealias/response.go b/typedapi/indices/deletealias/response.go index eef89e1885..f743aac7bb 100644 --- a/typedapi/indices/deletealias/response.go +++ b/typedapi/indices/deletealias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletealias // Response holds the response body struct for the package deletealias // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/delete_alias/IndicesDeleteAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/delete_alias/IndicesDeleteAliasResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go b/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go index e4903b8de6..ad61099234 100644 --- a/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go +++ b/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete data stream lifecycles. // Removes the data stream lifecycle from a data stream, rendering it not diff --git a/typedapi/indices/deletedatalifecycle/response.go b/typedapi/indices/deletedatalifecycle/response.go index 5c6e881bf0..ea8b14e680 100644 --- a/typedapi/indices/deletedatalifecycle/response.go +++ b/typedapi/indices/deletedatalifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletedatalifecycle // Response holds the response body struct for the package deletedatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/delete_data_lifecycle/IndicesDeleteDataLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/delete_data_lifecycle/IndicesDeleteDataLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deletedatastream/delete_data_stream.go b/typedapi/indices/deletedatastream/delete_data_stream.go index 6e5a8f9e92..3e6b7927b3 100644 --- a/typedapi/indices/deletedatastream/delete_data_stream.go +++ b/typedapi/indices/deletedatastream/delete_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete data streams. // Deletes one or more data streams and their backing indices. @@ -81,7 +81,7 @@ func NewDeleteDataStreamFunc(tp elastictransport.Interface) NewDeleteDataStream // Delete data streams. // Deletes one or more data streams and their backing indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-data-stream.html func New(tp elastictransport.Interface) *DeleteDataStream { r := &DeleteDataStream{ transport: tp, @@ -301,6 +301,15 @@ func (r *DeleteDataStream) _name(name string) *DeleteDataStream { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *DeleteDataStream) MasterTimeout(duration string) *DeleteDataStream { + r.values.Set("master_timeout", duration) + + return r +} + // ExpandWildcards Type of data stream that wildcard patterns can match. Supports // comma-separated values,such as `open,hidden`. // API name: expand_wildcards diff --git a/typedapi/indices/deletedatastream/response.go b/typedapi/indices/deletedatastream/response.go index f78cb5e1d8..80a0e8e73f 100644 --- a/typedapi/indices/deletedatastream/response.go +++ b/typedapi/indices/deletedatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletedatastream // Response holds the response body struct for the package deletedatastream // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/delete_data_stream/IndicesDeleteDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/delete_data_stream/IndicesDeleteDataStreamResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deleteindextemplate/delete_index_template.go b/typedapi/indices/deleteindextemplate/delete_index_template.go index a838886ab3..09fdaac554 100644 --- a/typedapi/indices/deleteindextemplate/delete_index_template.go +++ b/typedapi/indices/deleteindextemplate/delete_index_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete an index template. // The provided may contain multiple template names separated diff --git a/typedapi/indices/deleteindextemplate/response.go b/typedapi/indices/deleteindextemplate/response.go index 3f1bf99da6..c43b0bbcde 100644 --- a/typedapi/indices/deleteindextemplate/response.go +++ b/typedapi/indices/deleteindextemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteindextemplate // Response holds the response body struct for the package deleteindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/deletetemplate/delete_template.go b/typedapi/indices/deletetemplate/delete_template.go index 4e3e149d2e..e5e624da61 100644 --- a/typedapi/indices/deletetemplate/delete_template.go +++ b/typedapi/indices/deletetemplate/delete_template.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a legacy index template. +// Delete a legacy index template. package deletetemplate import ( @@ -76,7 +76,7 @@ func NewDeleteTemplateFunc(tp elastictransport.Interface) NewDeleteTemplate { } } -// Deletes a legacy index template. +// Delete a legacy index template. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-template-v1.html func New(tp elastictransport.Interface) *DeleteTemplate { diff --git a/typedapi/indices/deletetemplate/response.go b/typedapi/indices/deletetemplate/response.go index 46eddafd62..6150cc2b4c 100644 --- a/typedapi/indices/deletetemplate/response.go +++ b/typedapi/indices/deletetemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletetemplate // Response holds the response body struct for the package deletetemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/delete_template/IndicesDeleteTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/delete_template/IndicesDeleteTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/diskusage/disk_usage.go b/typedapi/indices/diskusage/disk_usage.go index edbac54cfb..e7c7604332 100644 --- a/typedapi/indices/diskusage/disk_usage.go +++ b/typedapi/indices/diskusage/disk_usage.go @@ -16,9 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Analyzes the disk usage of each field of an index or data stream. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Analyze the index disk usage. +// Analyze the disk usage of each field of an index or data stream. +// This API might not support indices created in previous Elasticsearch +// versions. +// The result of a small index can be inaccurate as some parts of an index might +// not be analyzed by the API. +// +// NOTE: The total size of fields of the analyzed shards of the index in the +// response is usually smaller than the index `store_size` value because some +// small metadata files are ignored and some parts of data files might not be +// scanned by the API. +// Since stored fields are stored together in a compressed format, the sizes of +// stored fields are also estimates and can be inaccurate. +// The stored size of the `_id` field is likely underestimated while the +// `_source` field is overestimated. package diskusage import ( @@ -77,7 +91,21 @@ func NewDiskUsageFunc(tp elastictransport.Interface) NewDiskUsage { } } -// Analyzes the disk usage of each field of an index or data stream. +// Analyze the index disk usage. +// Analyze the disk usage of each field of an index or data stream. +// This API might not support indices created in previous Elasticsearch +// versions. +// The result of a small index can be inaccurate as some parts of an index might +// not be analyzed by the API. +// +// NOTE: The total size of fields of the analyzed shards of the index in the +// response is usually smaller than the index `store_size` value because some +// small metadata files are ignored and some parts of data files might not be +// scanned by the API. +// Since stored fields are stored together in a compressed format, the sizes of +// stored fields are also estimates and can be inaccurate. +// The stored size of the `_id` field is likely underestimated while the +// `_source` field is overestimated. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-disk-usage.html func New(tp elastictransport.Interface) *DiskUsage { diff --git a/typedapi/indices/diskusage/response.go b/typedapi/indices/diskusage/response.go index 9b1ae4c481..6c63f024e3 100644 --- a/typedapi/indices/diskusage/response.go +++ b/typedapi/indices/diskusage/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package diskusage @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package diskusage // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L25 type Response = json.RawMessage diff --git a/typedapi/indices/downsample/downsample.go b/typedapi/indices/downsample/downsample.go index bf8d36ca2b..5181e19d1c 100644 --- a/typedapi/indices/downsample/downsample.go +++ b/typedapi/indices/downsample/downsample.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Aggregates a time series (TSDS) index and stores pre-computed statistical +// Downsample an index. +// Aggregate a time series (TSDS) index and store pre-computed statistical // summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric // field grouped by a configured time interval. +// For example, a TSDS index that contains metrics sampled every 10 seconds can +// be downsampled to an hourly index. +// All documents within an hour interval are summarized and stored as a single +// document in the downsample index. +// +// NOTE: Only indices in a time series data stream are supported. +// Neither field nor document level security can be defined on the source index. +// The source index must be read only (`index.blocks.write: true`). package downsample import ( @@ -88,9 +97,18 @@ func NewDownsampleFunc(tp elastictransport.Interface) NewDownsample { } } -// Aggregates a time series (TSDS) index and stores pre-computed statistical +// Downsample an index. +// Aggregate a time series (TSDS) index and store pre-computed statistical // summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric // field grouped by a configured time interval. +// For example, a TSDS index that contains metrics sampled every 10 seconds can +// be downsampled to an hourly index. +// All documents within an hour interval are summarized and stored as a single +// document in the downsample index. +// +// NOTE: Only indices in a time series data stream are supported. +// Neither field nor document level security can be defined on the source index. +// The source index must be read only (`index.blocks.write: true`). // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-downsample-data-stream.html func New(tp elastictransport.Interface) *Downsample { @@ -100,8 +118,6 @@ func New(tp elastictransport.Interface) *Downsample { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -381,9 +397,14 @@ func (r *Downsample) Pretty(pretty bool) *Downsample { return r } -// FixedInterval The interval at which to aggregate the original time series index. +// The interval at which to aggregate the original time series index. // API name: fixed_interval func (r *Downsample) FixedInterval(durationlarge string) *Downsample { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FixedInterval = durationlarge return r diff --git a/typedapi/indices/downsample/request.go b/typedapi/indices/downsample/request.go index 62efde8531..65efe851ab 100644 --- a/typedapi/indices/downsample/request.go +++ b/typedapi/indices/downsample/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package downsample @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package downsample // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/downsample/Request.ts#L24-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/downsample/Request.ts#L24-L58 type Request = types.DownsampleConfig // NewRequest returns a Request diff --git a/typedapi/indices/downsample/response.go b/typedapi/indices/downsample/response.go index 570a6bc035..1225ff6b24 100644 --- a/typedapi/indices/downsample/response.go +++ b/typedapi/indices/downsample/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package downsample @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package downsample // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/downsample/Response.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/downsample/Response.ts#L22-L25 type Response = json.RawMessage diff --git a/typedapi/indices/exists/exists.go b/typedapi/indices/exists/exists.go index b3c7d6b7bb..8b0f7bef11 100644 --- a/typedapi/indices/exists/exists.go +++ b/typedapi/indices/exists/exists.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Check indices. -// Checks if one or more indices, index aliases, or data streams exist. +// Check if one or more indices, index aliases, or data streams exist. package exists import ( @@ -77,7 +77,7 @@ func NewExistsFunc(tp elastictransport.Interface) NewExists { } // Check indices. -// Checks if one or more indices, index aliases, or data streams exist. +// Check if one or more indices, index aliases, or data streams exist. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html func New(tp elastictransport.Interface) *Exists { diff --git a/typedapi/indices/existsalias/exists_alias.go b/typedapi/indices/existsalias/exists_alias.go index 00f2ae3eef..697d3d391f 100644 --- a/typedapi/indices/existsalias/exists_alias.go +++ b/typedapi/indices/existsalias/exists_alias.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Check aliases. -// Checks if one or more data stream or index aliases exist. +// +// Check if one or more data stream or index aliases exist. package existsalias import ( @@ -80,9 +81,10 @@ func NewExistsAliasFunc(tp elastictransport.Interface) NewExistsAlias { } // Check aliases. -// Checks if one or more data stream or index aliases exist. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// Check if one or more data stream or index aliases exist. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-alias func New(tp elastictransport.Interface) *ExistsAlias { r := &ExistsAlias{ transport: tp, diff --git a/typedapi/indices/existsindextemplate/exists_index_template.go b/typedapi/indices/existsindextemplate/exists_index_template.go index fc34754241..36d84ac2d0 100644 --- a/typedapi/indices/existsindextemplate/exists_index_template.go +++ b/typedapi/indices/existsindextemplate/exists_index_template.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about whether a particular index template exists. +// Check index templates. +// +// Check whether index templates exist. package existsindextemplate import ( @@ -74,9 +76,11 @@ func NewExistsIndexTemplateFunc(tp elastictransport.Interface) NewExistsIndexTem } } -// Returns information about whether a particular index template exists. +// Check index templates. +// +// Check whether index templates exist. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-index-template func New(tp elastictransport.Interface) *ExistsIndexTemplate { r := &ExistsIndexTemplate{ transport: tp, diff --git a/typedapi/indices/existstemplate/exists_template.go b/typedapi/indices/existstemplate/exists_template.go index be0696a70a..3eb9ae80d5 100644 --- a/typedapi/indices/existstemplate/exists_template.go +++ b/typedapi/indices/existstemplate/exists_template.go @@ -16,10 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Check existence of index templates. -// Returns information about whether a particular index template exists. +// Get information about whether index templates exist. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. package existstemplate import ( @@ -76,7 +82,13 @@ func NewExistsTemplateFunc(tp elastictransport.Interface) NewExistsTemplate { } // Check existence of index templates. -// Returns information about whether a particular index template exists. +// Get information about whether index templates exist. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-template-exists-v1.html func New(tp elastictransport.Interface) *ExistsTemplate { @@ -239,7 +251,8 @@ func (r *ExistsTemplate) Header(key, value string) *ExistsTemplate { return r } -// Name The comma separated names of the index templates +// Name A comma-separated list of index template names used to limit the request. +// Wildcard (`*`) expressions are supported. // API Name: name func (r *ExistsTemplate) _name(name string) *ExistsTemplate { r.paramSet |= nameMask @@ -248,7 +261,7 @@ func (r *ExistsTemplate) _name(name string) *ExistsTemplate { return r } -// FlatSettings Return settings in flat format (default: false) +// FlatSettings Indicates whether to use a flat format for the response. // API name: flat_settings func (r *ExistsTemplate) FlatSettings(flatsettings bool) *ExistsTemplate { r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) @@ -256,8 +269,7 @@ func (r *ExistsTemplate) FlatSettings(flatsettings bool) *ExistsTemplate { return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local Indicates whether to get information from the local node only. // API name: local func (r *ExistsTemplate) Local(local bool) *ExistsTemplate { r.values.Set("local", strconv.FormatBool(local)) @@ -265,7 +277,10 @@ func (r *ExistsTemplate) Local(local bool) *ExistsTemplate { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *ExistsTemplate) MasterTimeout(duration string) *ExistsTemplate { r.values.Set("master_timeout", duration) diff --git a/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go b/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go index 28bbd24606..3a924e3227 100644 --- a/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go +++ b/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get the status for a data stream lifecycle. -// Retrieves information about an index or data stream’s current data stream -// lifecycle status, such as time since index creation, time since rollover, the -// lifecycle configuration managing the index, or any errors encountered during -// lifecycle execution. +// Get information about an index or data stream's current data stream lifecycle +// status, such as time since index creation, time since rollover, the lifecycle +// configuration managing the index, or any errors encountered during lifecycle +// execution. package explaindatalifecycle import ( @@ -81,10 +81,10 @@ func NewExplainDataLifecycleFunc(tp elastictransport.Interface) NewExplainDataLi } // Get the status for a data stream lifecycle. -// Retrieves information about an index or data stream’s current data stream -// lifecycle status, such as time since index creation, time since rollover, the -// lifecycle configuration managing the index, or any errors encountered during -// lifecycle execution. +// Get information about an index or data stream's current data stream lifecycle +// status, such as time since index creation, time since rollover, the lifecycle +// configuration managing the index, or any errors encountered during lifecycle +// execution. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html func New(tp elastictransport.Interface) *ExplainDataLifecycle { diff --git a/typedapi/indices/explaindatalifecycle/response.go b/typedapi/indices/explaindatalifecycle/response.go index 504b563864..547952d544 100644 --- a/typedapi/indices/explaindatalifecycle/response.go +++ b/typedapi/indices/explaindatalifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package explaindatalifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explaindatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L25-L29 type Response struct { Indices map[string]types.DataStreamLifecycleExplain `json:"indices"` } diff --git a/typedapi/indices/fieldusagestats/field_usage_stats.go b/typedapi/indices/fieldusagestats/field_usage_stats.go index c1bb1a3303..3b231f1867 100644 --- a/typedapi/indices/fieldusagestats/field_usage_stats.go +++ b/typedapi/indices/fieldusagestats/field_usage_stats.go @@ -16,9 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Returns field usage information for each shard and field of an index. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get field usage stats. +// Get field usage information for each shard and field of an index. +// Field usage statistics are automatically captured when queries are running on +// a cluster. +// A shard-level search request that accesses a given field, even if multiple +// times during that request, is counted as a single use. +// +// The response body reports the per-shard usage count of the data structures +// that back the fields in the index. +// A given request will increment each count by a maximum value of 1, even if +// the request accesses the same field multiple times. package fieldusagestats import ( @@ -77,7 +87,17 @@ func NewFieldUsageStatsFunc(tp elastictransport.Interface) NewFieldUsageStats { } } -// Returns field usage information for each shard and field of an index. +// Get field usage stats. +// Get field usage information for each shard and field of an index. +// Field usage statistics are automatically captured when queries are running on +// a cluster. +// A shard-level search request that accesses a given field, even if multiple +// times during that request, is counted as a single use. +// +// The response body reports the per-shard usage count of the data structures +// that back the fields in the index. +// A given request will increment each count by a maximum value of 1, even if +// the request accesses the same field multiple times. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/field-usage-stats.html func New(tp elastictransport.Interface) *FieldUsageStats { @@ -343,26 +363,6 @@ func (r *FieldUsageStats) Fields(fields ...string) *FieldUsageStats { return r } -// MasterTimeout Period to wait for a connection to the master node. -// If no response is received before the timeout expires, the request fails and -// returns an error. -// API name: master_timeout -func (r *FieldUsageStats) MasterTimeout(duration string) *FieldUsageStats { - r.values.Set("master_timeout", duration) - - return r -} - -// Timeout Period to wait for a response. -// If no response is received before the timeout expires, the request fails and -// returns an error. -// API name: timeout -func (r *FieldUsageStats) Timeout(duration string) *FieldUsageStats { - r.values.Set("timeout", duration) - - return r -} - // WaitForActiveShards The number of shard copies that must be active before proceeding with the // operation. // Set to all or any positive integer up to the total number of shards in the diff --git a/typedapi/indices/fieldusagestats/response.go b/typedapi/indices/fieldusagestats/response.go index 37931ae0f2..b4decf536e 100644 --- a/typedapi/indices/fieldusagestats/response.go +++ b/typedapi/indices/fieldusagestats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package fieldusagestats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package fieldusagestats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L28-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L28-L30 type Response struct { FieldsUsageBody map[string]types.UsageStatsIndex `json:"-"` Shards_ types.ShardStatistics `json:"_shards"` diff --git a/typedapi/indices/flush/flush.go b/typedapi/indices/flush/flush.go index 96997fc66a..0429602057 100644 --- a/typedapi/indices/flush/flush.go +++ b/typedapi/indices/flush/flush.go @@ -16,9 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Flushes one or more data streams or indices. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Flush data streams or indices. +// Flushing a data stream or index is the process of making sure that any data +// that is currently only stored in the transaction log is also permanently +// stored in the Lucene index. +// When restarting, Elasticsearch replays any unflushed operations from the +// transaction log into the Lucene index to bring it back into the state that it +// was in before the restart. +// Elasticsearch automatically triggers flushes as needed, using heuristics that +// trade off the size of the unflushed transaction log against the cost of +// performing each flush. +// +// After each operation has been flushed it is permanently stored in the Lucene +// index. +// This may mean that there is no need to maintain an additional copy of it in +// the transaction log. +// The transaction log is made up of multiple files, called generations, and +// Elasticsearch will delete any generation files when they are no longer +// needed, freeing up disk space. +// +// It is also possible to trigger a flush on one or more indices using the flush +// API, although it is rare for users to need to call this API directly. +// If you call the flush API after indexing some documents then a successful +// response indicates that Elasticsearch has flushed all the documents that were +// indexed before the flush API was called. package flush import ( @@ -75,7 +98,30 @@ func NewFlushFunc(tp elastictransport.Interface) NewFlush { } } -// Flushes one or more data streams or indices. +// Flush data streams or indices. +// Flushing a data stream or index is the process of making sure that any data +// that is currently only stored in the transaction log is also permanently +// stored in the Lucene index. +// When restarting, Elasticsearch replays any unflushed operations from the +// transaction log into the Lucene index to bring it back into the state that it +// was in before the restart. +// Elasticsearch automatically triggers flushes as needed, using heuristics that +// trade off the size of the unflushed transaction log against the cost of +// performing each flush. +// +// After each operation has been flushed it is permanently stored in the Lucene +// index. +// This may mean that there is no need to maintain an additional copy of it in +// the transaction log. +// The transaction log is made up of multiple files, called generations, and +// Elasticsearch will delete any generation files when they are no longer +// needed, freeing up disk space. +// +// It is also possible to trigger a flush on one or more indices using the flush +// API, although it is rare for users to need to call this API directly. +// If you call the flush API after indexing some documents then a successful +// response indicates that Elasticsearch has flushed all the documents that were +// indexed before the flush API was called. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html func New(tp elastictransport.Interface) *Flush { diff --git a/typedapi/indices/flush/response.go b/typedapi/indices/flush/response.go index c196f9a5ae..42e84f625c 100644 --- a/typedapi/indices/flush/response.go +++ b/typedapi/indices/flush/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package flush @@ -26,9 +26,9 @@ import ( // Response holds the response body struct for the package flush // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/flush/IndicesFlushResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/flush/IndicesFlushResponse.ts#L22-L24 type Response struct { - Shards_ types.ShardStatistics `json:"_shards"` + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/indices/forcemerge/forcemerge.go b/typedapi/indices/forcemerge/forcemerge.go index 3f05ae06c8..69f74339f7 100644 --- a/typedapi/indices/forcemerge/forcemerge.go +++ b/typedapi/indices/forcemerge/forcemerge.go @@ -16,9 +16,89 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Performs the force merge operation on one or more indices. +// Force a merge. +// Perform the force merge operation on the shards of one or more indices. +// For data streams, the API forces a merge on the shards of the stream's +// backing indices. +// +// Merging reduces the number of segments in each shard by merging some of them +// together and also frees up the space used by deleted documents. +// Merging normally happens automatically, but sometimes it is useful to trigger +// a merge manually. +// +// WARNING: We recommend force merging only a read-only index (meaning the index +// is no longer receiving writes). +// When documents are updated or deleted, the old version is not immediately +// removed but instead soft-deleted and marked with a "tombstone". +// These soft-deleted documents are automatically cleaned up during regular +// segment merges. +// But force merge can cause very large (greater than 5 GB) segments to be +// produced, which are not eligible for regular merges. +// So the number of soft-deleted documents can then grow rapidly, resulting in +// higher disk usage and worse search performance. +// If you regularly force merge an index receiving writes, this can also make +// snapshots more expensive, since the new documents can't be backed up +// incrementally. +// +// **Blocks during a force merge** +// +// Calls to this API block until the merge is complete (unless request contains +// `wait_for_completion=false`). +// If the client connection is lost before completion then the force merge +// process will continue in the background. +// Any new requests to force merge the same indices will also block until the +// ongoing force merge is complete. +// +// **Running force merge asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to get the status of the task. +// However, you can not cancel this task as the force merge task is not +// cancelable. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// When you are done with a task, you should delete the task document so +// Elasticsearch can reclaim the space. +// +// **Force merging multiple indices** +// +// You can force merge multiple indices with a single request by targeting: +// +// * One or more data streams that contain multiple backing indices +// * Multiple indices +// * One or more aliases +// * All data streams and indices in a cluster +// +// Each targeted shard is force-merged separately using the force_merge +// threadpool. +// By default each node only has a single `force_merge` thread which means that +// the shards on that node are force-merged one at a time. +// If you expand the `force_merge` threadpool on a node then it will force merge +// its shards in parallel +// +// Force merge makes the storage for the shard being merged temporarily +// increase, as it may require free space up to triple its size in case +// `max_num_segments parameter` is set to `1`, to rewrite all segments into a +// new one. +// +// **Data streams and time-based indices** +// +// Force-merging is useful for managing a data stream's older backing indices +// and other time-based indices, particularly after a rollover. +// In these cases, each index only receives indexing traffic for a certain +// period of time. +// Once an index receive no more writes, its shards can be force-merged to a +// single segment. +// This can be a good idea because single-segment shards can sometimes use +// simpler and more efficient data structures to perform searches. +// For example: +// +// ``` +// POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +// ``` package forcemerge import ( @@ -75,7 +155,87 @@ func NewForcemergeFunc(tp elastictransport.Interface) NewForcemerge { } } -// Performs the force merge operation on one or more indices. +// Force a merge. +// Perform the force merge operation on the shards of one or more indices. +// For data streams, the API forces a merge on the shards of the stream's +// backing indices. +// +// Merging reduces the number of segments in each shard by merging some of them +// together and also frees up the space used by deleted documents. +// Merging normally happens automatically, but sometimes it is useful to trigger +// a merge manually. +// +// WARNING: We recommend force merging only a read-only index (meaning the index +// is no longer receiving writes). +// When documents are updated or deleted, the old version is not immediately +// removed but instead soft-deleted and marked with a "tombstone". +// These soft-deleted documents are automatically cleaned up during regular +// segment merges. +// But force merge can cause very large (greater than 5 GB) segments to be +// produced, which are not eligible for regular merges. +// So the number of soft-deleted documents can then grow rapidly, resulting in +// higher disk usage and worse search performance. +// If you regularly force merge an index receiving writes, this can also make +// snapshots more expensive, since the new documents can't be backed up +// incrementally. +// +// **Blocks during a force merge** +// +// Calls to this API block until the merge is complete (unless request contains +// `wait_for_completion=false`). +// If the client connection is lost before completion then the force merge +// process will continue in the background. +// Any new requests to force merge the same indices will also block until the +// ongoing force merge is complete. +// +// **Running force merge asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to get the status of the task. +// However, you can not cancel this task as the force merge task is not +// cancelable. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// When you are done with a task, you should delete the task document so +// Elasticsearch can reclaim the space. +// +// **Force merging multiple indices** +// +// You can force merge multiple indices with a single request by targeting: +// +// * One or more data streams that contain multiple backing indices +// * Multiple indices +// * One or more aliases +// * All data streams and indices in a cluster +// +// Each targeted shard is force-merged separately using the force_merge +// threadpool. +// By default each node only has a single `force_merge` thread which means that +// the shards on that node are force-merged one at a time. +// If you expand the `force_merge` threadpool on a node then it will force merge +// its shards in parallel +// +// Force merge makes the storage for the shard being merged temporarily +// increase, as it may require free space up to triple its size in case +// `max_num_segments parameter` is set to `1`, to rewrite all segments into a +// new one. +// +// **Data streams and time-based indices** +// +// Force-merging is useful for managing a data stream's older backing indices +// and other time-based indices, particularly after a rollover. +// In these cases, each index only receives indexing traffic for a certain +// period of time. +// Once an index receive no more writes, its shards can be force-merged to a +// single segment. +// This can be a good idea because single-segment shards can sometimes use +// simpler and more efficient data structures to perform searches. +// For example: +// +// ``` +// POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +// ``` // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html func New(tp elastictransport.Interface) *Forcemerge { diff --git a/typedapi/indices/forcemerge/response.go b/typedapi/indices/forcemerge/response.go index 227229c387..b183e6f70e 100644 --- a/typedapi/indices/forcemerge/response.go +++ b/typedapi/indices/forcemerge/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package forcemerge @@ -26,9 +26,9 @@ import ( // Response holds the response body struct for the package forcemerge // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/forcemerge/IndicesForceMergeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/forcemerge/IndicesForceMergeResponse.ts#L22-L24 type Response struct { - Shards_ types.ShardStatistics `json:"_shards"` + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` // Task task contains a task id returned when wait_for_completion=false, // you can use the task_id to get the status of the task at _tasks/ Task *string `json:"task,omitempty"` diff --git a/typedapi/indices/get/get.go b/typedapi/indices/get/get.go index c8fd4c10d9..0959906288 100644 --- a/typedapi/indices/get/get.go +++ b/typedapi/indices/get/get.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get index information. -// Returns information about one or more indices. For data streams, the API -// returns information about the +// Get information about one or more indices. For data streams, the API returns +// information about the // stream’s backing indices. package get @@ -82,8 +82,8 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } // Get index information. -// Returns information about one or more indices. For data streams, the API -// returns information about the +// Get information about one or more indices. For data streams, the API returns +// information about the // stream’s backing indices. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html diff --git a/typedapi/indices/get/response.go b/typedapi/indices/get/response.go index 2d4f33e7f2..5da14e9bf4 100644 --- a/typedapi/indices/get/response.go +++ b/typedapi/indices/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get/IndicesGetResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get/IndicesGetResponse.ts#L24-L27 type Response map[string]types.IndexState diff --git a/typedapi/indices/getalias/get_alias.go b/typedapi/indices/getalias/get_alias.go index f76f209925..ce123abb5f 100644 --- a/typedapi/indices/getalias/get_alias.go +++ b/typedapi/indices/getalias/get_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get aliases. // Retrieves information for one or more data stream or index aliases. @@ -83,7 +83,7 @@ func NewGetAliasFunc(tp elastictransport.Interface) NewGetAlias { // Get aliases. // Retrieves information for one or more data stream or index aliases. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-alias.html func New(tp elastictransport.Interface) *GetAlias { r := &GetAlias{ transport: tp, diff --git a/typedapi/indices/getalias/response.go b/typedapi/indices/getalias/response.go index ab79a8e3fb..4d0b37f102 100644 --- a/typedapi/indices/getalias/response.go +++ b/typedapi/indices/getalias/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getalias @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getalias // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_alias/IndicesGetAliasResponse.ts#L26-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_alias/IndicesGetAliasResponse.ts#L26-L35 type Response map[string]types.IndexAliases diff --git a/typedapi/indices/getdatalifecycle/get_data_lifecycle.go b/typedapi/indices/getdatalifecycle/get_data_lifecycle.go index 30b5eda574..5f6820cf02 100644 --- a/typedapi/indices/getdatalifecycle/get_data_lifecycle.go +++ b/typedapi/indices/getdatalifecycle/get_data_lifecycle.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get data stream lifecycles. -// Retrieves the data stream lifecycle configuration of one or more data -// streams. +// +// Get the data stream lifecycle configuration of one or more data streams. package getdatalifecycle import ( @@ -80,8 +80,8 @@ func NewGetDataLifecycleFunc(tp elastictransport.Interface) NewGetDataLifecycle } // Get data stream lifecycles. -// Retrieves the data stream lifecycle configuration of one or more data -// streams. +// +// Get the data stream lifecycle configuration of one or more data streams. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-get-lifecycle.html func New(tp elastictransport.Interface) *GetDataLifecycle { @@ -328,6 +328,15 @@ func (r *GetDataLifecycle) IncludeDefaults(includedefaults bool) *GetDataLifecyc return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetDataLifecycle) MasterTimeout(duration string) *GetDataLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/getdatalifecycle/response.go b/typedapi/indices/getdatalifecycle/response.go index b2e25680cb..a2acc1c9ac 100644 --- a/typedapi/indices/getdatalifecycle/response.go +++ b/typedapi/indices/getdatalifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getdatalifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L23-L25 type Response struct { DataStreams []types.DataStreamWithLifecycle `json:"data_streams"` } diff --git a/typedapi/indices/getdatalifecyclestats/get_data_lifecycle_stats.go b/typedapi/indices/getdatalifecyclestats/get_data_lifecycle_stats.go new file mode 100644 index 0000000000..01785de76b --- /dev/null +++ b/typedapi/indices/getdatalifecyclestats/get_data_lifecycle_stats.go @@ -0,0 +1,325 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get data stream lifecycle stats. +// Get statistics about the data streams that are managed by a data stream +// lifecycle. +package getdatalifecyclestats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataLifecycleStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDataLifecycleStats type alias for index. +type NewGetDataLifecycleStats func() *GetDataLifecycleStats + +// NewGetDataLifecycleStatsFunc returns a new instance of GetDataLifecycleStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataLifecycleStatsFunc(tp elastictransport.Interface) NewGetDataLifecycleStats { + return func() *GetDataLifecycleStats { + n := New(tp) + + return n + } +} + +// Get data stream lifecycle stats. +// Get statistics about the data streams that are managed by a data stream +// lifecycle. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-get-lifecycle-stats.html +func New(tp elastictransport.Interface) *GetDataLifecycleStats { + r := &GetDataLifecycleStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataLifecycleStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_lifecycle") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataLifecycleStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_lifecycle_stats") + if reader := instrument.RecordRequestBody(ctx, "indices.get_data_lifecycle_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_lifecycle_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDataLifecycleStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatalifecyclestats.Response +func (r GetDataLifecycleStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataLifecycleStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDataLifecycleStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDataLifecycleStats headers map. +func (r *GetDataLifecycleStats) Header(key, value string) *GetDataLifecycleStats { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDataLifecycleStats) ErrorTrace(errortrace bool) *GetDataLifecycleStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDataLifecycleStats) FilterPath(filterpaths ...string) *GetDataLifecycleStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDataLifecycleStats) Human(human bool) *GetDataLifecycleStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDataLifecycleStats) Pretty(pretty bool) *GetDataLifecycleStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/indices/getdatalifecyclestats/response.go b/typedapi/indices/getdatalifecyclestats/response.go new file mode 100644 index 0000000000..d0978f900f --- /dev/null +++ b/typedapi/indices/getdatalifecyclestats/response.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package getdatalifecyclestats + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getdatalifecyclestats +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L24-L44 +type Response struct { + + // DataStreamCount The count of data streams currently being managed by the data stream + // lifecycle. + DataStreamCount int `json:"data_stream_count"` + // DataStreams Information about the data streams that are managed by the data stream + // lifecycle. + DataStreams []types.DataStreamStats `json:"data_streams"` + // LastRunDurationInMillis The duration of the last data stream lifecycle execution. + LastRunDurationInMillis *int64 `json:"last_run_duration_in_millis,omitempty"` + // TimeBetweenStartsInMillis The time that passed between the start of the last two data stream lifecycle + // executions. + // This value should amount approximately to + // `data_streams.lifecycle.poll_interval`. + TimeBetweenStartsInMillis *int64 `json:"time_between_starts_in_millis,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/indices/getdatastream/get_data_stream.go b/typedapi/indices/getdatastream/get_data_stream.go index bfc4a768d1..3fb6c249f5 100644 --- a/typedapi/indices/getdatastream/get_data_stream.go +++ b/typedapi/indices/getdatastream/get_data_stream.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get data streams. -// Retrieves information about one or more data streams. +// +// Get information about one or more data streams. package getdatastream import ( @@ -77,9 +78,10 @@ func NewGetDataStreamFunc(tp elastictransport.Interface) NewGetDataStream { } // Get data streams. -// Retrieves information about one or more data streams. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// Get information about one or more data streams. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-data-stream.html func New(tp elastictransport.Interface) *GetDataStream { r := &GetDataStream{ transport: tp, @@ -326,6 +328,24 @@ func (r *GetDataStream) IncludeDefaults(includedefaults bool) *GetDataStream { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetDataStream) MasterTimeout(duration string) *GetDataStream { + r.values.Set("master_timeout", duration) + + return r +} + +// Verbose Whether the maximum timestamp for each data stream should be calculated and +// returned. +// API name: verbose +func (r *GetDataStream) Verbose(verbose bool) *GetDataStream { + r.values.Set("verbose", strconv.FormatBool(verbose)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/getdatastream/response.go b/typedapi/indices/getdatastream/response.go index 53d3dae5ad..fe04320022 100644 --- a/typedapi/indices/getdatastream/response.go +++ b/typedapi/indices/getdatastream/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getdatastream @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatastream // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_data_stream/IndicesGetDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_data_stream/IndicesGetDataStreamResponse.ts#L22-L24 type Response struct { DataStreams []types.DataStream `json:"data_streams"` } diff --git a/typedapi/indices/getfieldmapping/get_field_mapping.go b/typedapi/indices/getfieldmapping/get_field_mapping.go index 1d89bc7c54..617ea68f78 100644 --- a/typedapi/indices/getfieldmapping/get_field_mapping.go +++ b/typedapi/indices/getfieldmapping/get_field_mapping.go @@ -16,12 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get mapping definitions. // Retrieves mapping definitions for one or more fields. // For data streams, the API retrieves field mappings for the stream’s backing // indices. +// +// This API is useful if you don't need a complete mapping or if an index +// mapping contains a large number of fields. package getfieldmapping import ( @@ -88,6 +91,9 @@ func NewGetFieldMappingFunc(tp elastictransport.Interface) NewGetFieldMapping { // For data streams, the API retrieves field mappings for the stream’s backing // indices. // +// This API is useful if you don't need a complete mapping or if an index +// mapping contains a large number of fields. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-field-mapping.html func New(tp elastictransport.Interface) *GetFieldMapping { r := &GetFieldMapping{ @@ -321,6 +327,7 @@ func (r *GetFieldMapping) Header(key, value string) *GetFieldMapping { // Fields Comma-separated list or wildcard expression of fields used to limit returned // information. +// Supports wildcards (`*`). // API Name: fields func (r *GetFieldMapping) _fields(fields string) *GetFieldMapping { r.paramSet |= fieldsMask diff --git a/typedapi/indices/getfieldmapping/response.go b/typedapi/indices/getfieldmapping/response.go index 2a89ff546c..71e3be6d9b 100644 --- a/typedapi/indices/getfieldmapping/response.go +++ b/typedapi/indices/getfieldmapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getfieldmapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getfieldmapping // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_field_mapping/IndicesGetFieldMappingResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_field_mapping/IndicesGetFieldMappingResponse.ts#L24-L27 type Response map[string]types.TypeFieldMappings diff --git a/typedapi/indices/getindextemplate/get_index_template.go b/typedapi/indices/getindextemplate/get_index_template.go index 599509007f..02d999090b 100644 --- a/typedapi/indices/getindextemplate/get_index_template.go +++ b/typedapi/indices/getindextemplate/get_index_template.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get index templates. -// Returns information about one or more index templates. +// Get information about one or more index templates. package getindextemplate import ( @@ -76,7 +76,7 @@ func NewGetIndexTemplateFunc(tp elastictransport.Interface) NewGetIndexTemplate } // Get index templates. -// Returns information about one or more index templates. +// Get information about one or more index templates. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-template.html func New(tp elastictransport.Interface) *GetIndexTemplate { diff --git a/typedapi/indices/getindextemplate/response.go b/typedapi/indices/getindextemplate/response.go index 5bedceb3ad..c7e81fa4cc 100644 --- a/typedapi/indices/getindextemplate/response.go +++ b/typedapi/indices/getindextemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getindextemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L23-L27 type Response struct { IndexTemplates []types.IndexTemplateItem `json:"index_templates"` } diff --git a/typedapi/indices/getmapping/get_mapping.go b/typedapi/indices/getmapping/get_mapping.go index 6d3c842e59..15c9679e5d 100644 --- a/typedapi/indices/getmapping/get_mapping.go +++ b/typedapi/indices/getmapping/get_mapping.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get mapping definitions. -// Retrieves mapping definitions for one or more indices. // For data streams, the API retrieves mappings for the stream’s backing // indices. package getmapping @@ -79,7 +78,6 @@ func NewGetMappingFunc(tp elastictransport.Interface) NewGetMapping { } // Get mapping definitions. -// Retrieves mapping definitions for one or more indices. // For data streams, the API retrieves mappings for the stream’s backing // indices. // diff --git a/typedapi/indices/getmapping/response.go b/typedapi/indices/getmapping/response.go index fd2ba1a939..e72a942d11 100644 --- a/typedapi/indices/getmapping/response.go +++ b/typedapi/indices/getmapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getmapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmapping // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L24-L27 type Response map[string]types.IndexMappingRecord diff --git a/typedapi/indices/getmigratereindexstatus/get_migrate_reindex_status.go b/typedapi/indices/getmigratereindexstatus/get_migrate_reindex_status.go new file mode 100644 index 0000000000..904b1aae40 --- /dev/null +++ b/typedapi/indices/getmigratereindexstatus/get_migrate_reindex_status.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get the migration reindexing status. +// +// Get the status of a migration reindex attempt for a data stream or index. +package getmigratereindexstatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetMigrateReindexStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetMigrateReindexStatus type alias for index. +type NewGetMigrateReindexStatus func(index string) *GetMigrateReindexStatus + +// NewGetMigrateReindexStatusFunc returns a new instance of GetMigrateReindexStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetMigrateReindexStatusFunc(tp elastictransport.Interface) NewGetMigrateReindexStatus { + return func(index string) *GetMigrateReindexStatus { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get the migration reindexing status. +// +// Get the status of a migration reindex attempt for a data stream or index. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html +func New(tp elastictransport.Interface) *GetMigrateReindexStatus { + r := &GetMigrateReindexStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetMigrateReindexStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetMigrateReindexStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_migrate_reindex_status") + if reader := instrument.RecordRequestBody(ctx, "indices.get_migrate_reindex_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_migrate_reindex_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetMigrateReindexStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getmigratereindexstatus.Response +func (r GetMigrateReindexStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetMigrateReindexStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetMigrateReindexStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetMigrateReindexStatus headers map. +func (r *GetMigrateReindexStatus) Header(key, value string) *GetMigrateReindexStatus { + r.headers.Set(key, value) + + return r +} + +// Index The index or data stream name. +// API Name: index +func (r *GetMigrateReindexStatus) _index(index string) *GetMigrateReindexStatus { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetMigrateReindexStatus) ErrorTrace(errortrace bool) *GetMigrateReindexStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetMigrateReindexStatus) FilterPath(filterpaths ...string) *GetMigrateReindexStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetMigrateReindexStatus) Human(human bool) *GetMigrateReindexStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetMigrateReindexStatus) Pretty(pretty bool) *GetMigrateReindexStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/indices/getmigratereindexstatus/response.go b/typedapi/indices/getmigratereindexstatus/response.go new file mode 100644 index 0000000000..ab74d0de31 --- /dev/null +++ b/typedapi/indices/getmigratereindexstatus/response.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package getmigratereindexstatus + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getmigratereindexstatus +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L23-L36 +type Response struct { + Complete bool `json:"complete"` + Errors []types.StatusError `json:"errors"` + Exception *string `json:"exception,omitempty"` + InProgress []types.StatusInProgress `json:"in_progress"` + Pending int `json:"pending"` + StartTime types.DateTime `json:"start_time,omitempty"` + StartTimeMillis int64 `json:"start_time_millis"` + Successes int `json:"successes"` + TotalIndicesInDataStream int `json:"total_indices_in_data_stream"` + TotalIndicesRequiringUpgrade int `json:"total_indices_requiring_upgrade"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "complete": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Complete", err) + } + s.Complete = value + case bool: + s.Complete = v + } + + case "errors": + if err := dec.Decode(&s.Errors); err != nil { + return fmt.Errorf("%s | %w", "Errors", err) + } + + case "exception": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Exception", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Exception = &o + + case "in_progress": + if err := dec.Decode(&s.InProgress); err != nil { + return fmt.Errorf("%s | %w", "InProgress", err) + } + + case "pending": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Pending", err) + } + s.Pending = value + case float64: + f := int(v) + s.Pending = f + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "start_time_millis": + if err := dec.Decode(&s.StartTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeMillis", err) + } + + case "successes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Successes", err) + } + s.Successes = value + case float64: + f := int(v) + s.Successes = f + } + + case "total_indices_in_data_stream": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalIndicesInDataStream", err) + } + s.TotalIndicesInDataStream = value + case float64: + f := int(v) + s.TotalIndicesInDataStream = f + } + + case "total_indices_requiring_upgrade": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalIndicesRequiringUpgrade", err) + } + s.TotalIndicesRequiringUpgrade = value + case float64: + f := int(v) + s.TotalIndicesRequiringUpgrade = f + } + + } + } + return nil +} diff --git a/typedapi/indices/getsettings/get_settings.go b/typedapi/indices/getsettings/get_settings.go index 4607e3f65d..2cf0614674 100644 --- a/typedapi/indices/getsettings/get_settings.go +++ b/typedapi/indices/getsettings/get_settings.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get index settings. -// Returns setting information for one or more indices. For data streams, -// returns setting information for the stream’s backing indices. +// Get setting information for one or more indices. +// For data streams, it returns setting information for the stream's backing +// indices. package getsettings import ( @@ -81,8 +82,9 @@ func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { } // Get index settings. -// Returns setting information for one or more indices. For data streams, -// returns setting information for the stream’s backing indices. +// Get setting information for one or more indices. +// For data streams, it returns setting information for the stream's backing +// indices. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html func New(tp elastictransport.Interface) *GetSettings { diff --git a/typedapi/indices/getsettings/response.go b/typedapi/indices/getsettings/response.go index 84d6048db6..4e94cdb4c0 100644 --- a/typedapi/indices/getsettings/response.go +++ b/typedapi/indices/getsettings/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getsettings @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getsettings // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_settings/IndicesGetSettingsResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_settings/IndicesGetSettingsResponse.ts#L24-L27 type Response map[string]types.IndexState diff --git a/typedapi/indices/gettemplate/get_template.go b/typedapi/indices/gettemplate/get_template.go index 4a378ab19c..4da9354ec1 100644 --- a/typedapi/indices/gettemplate/get_template.go +++ b/typedapi/indices/gettemplate/get_template.go @@ -16,10 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get index templates. -// Retrieves information about one or more index templates. +// Get information about one or more index templates. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. package gettemplate import ( @@ -76,7 +80,11 @@ func NewGetTemplateFunc(tp elastictransport.Interface) NewGetTemplate { } // Get index templates. -// Retrieves information about one or more index templates. +// Get information about one or more index templates. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-template-v1.html func New(tp elastictransport.Interface) *GetTemplate { diff --git a/typedapi/indices/gettemplate/response.go b/typedapi/indices/gettemplate/response.go index 7d746fca9c..786a1671c9 100644 --- a/typedapi/indices/gettemplate/response.go +++ b/typedapi/indices/gettemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package gettemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_template/IndicesGetTemplateResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_template/IndicesGetTemplateResponse.ts#L23-L26 type Response map[string]types.TemplateMapping diff --git a/typedapi/indices/migratereindex/migrate_reindex.go b/typedapi/indices/migratereindex/migrate_reindex.go new file mode 100644 index 0000000000..4b88050565 --- /dev/null +++ b/typedapi/indices/migratereindex/migrate_reindex.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Reindex legacy backing indices. +// +// Reindex all legacy backing indices for a data stream. +// This operation occurs in a persistent task. +// The persistent task ID is returned immediately and the reindexing work is +// completed in that task. +package migratereindex + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/modeenum" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MigrateReindex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMigrateReindex type alias for index. +type NewMigrateReindex func() *MigrateReindex + +// NewMigrateReindexFunc returns a new instance of MigrateReindex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMigrateReindexFunc(tp elastictransport.Interface) NewMigrateReindex { + return func() *MigrateReindex { + n := New(tp) + + return n + } +} + +// Reindex legacy backing indices. +// +// Reindex all legacy backing indices for a data stream. +// This operation occurs in a persistent task. +// The persistent task ID is returned immediately and the reindexing work is +// completed in that task. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html +func New(tp elastictransport.Interface) *MigrateReindex { + r := &MigrateReindex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *MigrateReindex) Raw(raw io.Reader) *MigrateReindex { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *MigrateReindex) Request(req *Request) *MigrateReindex { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MigrateReindex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for MigrateReindex: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MigrateReindex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.migrate_reindex") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.migrate_reindex") + if reader := instrument.RecordRequestBody(ctx, "indices.migrate_reindex", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.migrate_reindex") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MigrateReindex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a migratereindex.Response +func (r MigrateReindex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the MigrateReindex headers map. +func (r *MigrateReindex) Header(key, value string) *MigrateReindex { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MigrateReindex) ErrorTrace(errortrace bool) *MigrateReindex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MigrateReindex) FilterPath(filterpaths ...string) *MigrateReindex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MigrateReindex) Human(human bool) *MigrateReindex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MigrateReindex) Pretty(pretty bool) *MigrateReindex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Reindex mode. Currently only 'upgrade' is supported. +// API name: mode +func (r *MigrateReindex) Mode(mode modeenum.ModeEnum) *MigrateReindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Mode = mode + return r +} + +// The source index or data stream (only data streams are currently supported). +// API name: source +func (r *MigrateReindex) Source(source types.SourceIndexVariant) *MigrateReindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = *source.SourceIndexCaster() + + return r +} diff --git a/typedapi/indices/migratereindex/request.go b/typedapi/indices/migratereindex/request.go new file mode 100644 index 0000000000..4faa5b1ca4 --- /dev/null +++ b/typedapi/indices/migratereindex/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package migratereindex + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package migratereindex +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L23-L37 +type Request = types.MigrateReindex + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewMigrateReindex() + + return r +} diff --git a/typedapi/indices/migratereindex/response.go b/typedapi/indices/migratereindex/response.go new file mode 100644 index 0000000000..687492cb32 --- /dev/null +++ b/typedapi/indices/migratereindex/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package migratereindex + +// Response holds the response body struct for the package migratereindex +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/migrate_reindex/MigrateReindexResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/indices/migratetodatastream/migrate_to_data_stream.go b/typedapi/indices/migratetodatastream/migrate_to_data_stream.go index 16681c8ea8..3bf52ece1c 100644 --- a/typedapi/indices/migratetodatastream/migrate_to_data_stream.go +++ b/typedapi/indices/migratetodatastream/migrate_to_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Convert an index alias to a data stream. // Converts an index alias to a data stream. @@ -102,7 +102,7 @@ func NewMigrateToDataStreamFunc(tp elastictransport.Interface) NewMigrateToDataS // The indices for the alias become hidden backing indices for the stream. // The write index for the alias becomes the write index for the stream. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-migrate-to-data-stream func New(tp elastictransport.Interface) *MigrateToDataStream { r := &MigrateToDataStream{ transport: tp, @@ -323,6 +323,24 @@ func (r *MigrateToDataStream) _name(name string) *MigrateToDataStream { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *MigrateToDataStream) MasterTimeout(duration string) *MigrateToDataStream { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *MigrateToDataStream) Timeout(duration string) *MigrateToDataStream { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/migratetodatastream/response.go b/typedapi/indices/migratetodatastream/response.go index 17566640b4..b058dcf345 100644 --- a/typedapi/indices/migratetodatastream/response.go +++ b/typedapi/indices/migratetodatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package migratetodatastream // Response holds the response body struct for the package migratetodatastream // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/migrate_to_data_stream/IndicesMigrateToDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/migrate_to_data_stream/IndicesMigrateToDataStreamResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/modifydatastream/modify_data_stream.go b/typedapi/indices/modifydatastream/modify_data_stream.go index 0c39aef484..f8c51bb5fc 100644 --- a/typedapi/indices/modifydatastream/modify_data_stream.go +++ b/typedapi/indices/modifydatastream/modify_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Update data streams. // Performs one or more data stream modification actions in a single atomic @@ -79,7 +79,7 @@ func NewModifyDataStreamFunc(tp elastictransport.Interface) NewModifyDataStream // Performs one or more data stream modification actions in a single atomic // operation. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-modify-data-stream func New(tp elastictransport.Interface) *ModifyDataStream { r := &ModifyDataStream{ transport: tp, @@ -87,8 +87,6 @@ func New(tp elastictransport.Interface) *ModifyDataStream { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -340,10 +338,17 @@ func (r *ModifyDataStream) Pretty(pretty bool) *ModifyDataStream { return r } -// Actions Actions to perform. +// Actions to perform. // API name: actions -func (r *ModifyDataStream) Actions(actions ...types.IndicesModifyAction) *ModifyDataStream { - r.req.Actions = actions +func (r *ModifyDataStream) Actions(actions ...types.IndicesModifyActionVariant) *ModifyDataStream { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range actions { + r.req.Actions = append(r.req.Actions, *v.IndicesModifyActionCaster()) + + } return r } diff --git a/typedapi/indices/modifydatastream/request.go b/typedapi/indices/modifydatastream/request.go index 63902f67c0..e4ce1a8c7a 100644 --- a/typedapi/indices/modifydatastream/request.go +++ b/typedapi/indices/modifydatastream/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package modifydatastream @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package modifydatastream // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/modify_data_stream/IndicesModifyDataStreamRequest.ts#L23-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/modify_data_stream/IndicesModifyDataStreamRequest.ts#L23-L45 type Request struct { // Actions Actions to perform. diff --git a/typedapi/indices/modifydatastream/response.go b/typedapi/indices/modifydatastream/response.go index 72663a283b..4dcdaaf4c9 100644 --- a/typedapi/indices/modifydatastream/response.go +++ b/typedapi/indices/modifydatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package modifydatastream // Response holds the response body struct for the package modifydatastream // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/open/open.go b/typedapi/indices/open/open.go index c2b62816b4..35a0fcc3aa 100644 --- a/typedapi/indices/open/open.go +++ b/typedapi/indices/open/open.go @@ -16,10 +16,44 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Opens a closed index. +// Open a closed index. // For data streams, the API opens any closed backing indices. +// +// A closed index is blocked for read/write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// This allows closed indices to not have to maintain internal data structures +// for indexing or searching documents, resulting in a smaller overhead on the +// cluster. +// +// When opening or closing an index, the master is responsible for restarting +// the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened or closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behavior can be turned off by using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the `action.destructive_requires_name` setting to `false`. +// This setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +// +// Because opening or closing an index allocates its shards, the +// `wait_for_active_shards` setting on index creation applies to the `_open` and +// `_close` index actions as well. package open import ( @@ -78,9 +112,43 @@ func NewOpenFunc(tp elastictransport.Interface) NewOpen { } } -// Opens a closed index. +// Open a closed index. // For data streams, the API opens any closed backing indices. // +// A closed index is blocked for read/write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// This allows closed indices to not have to maintain internal data structures +// for indexing or searching documents, resulting in a smaller overhead on the +// cluster. +// +// When opening or closing an index, the master is responsible for restarting +// the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened or closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behavior can be turned off by using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the `action.destructive_requires_name` setting to `false`. +// This setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +// +// Because opening or closing an index allocates its shards, the +// `wait_for_active_shards` setting on index creation applies to the `_open` and +// `_close` index actions as well. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html func New(tp elastictransport.Interface) *Open { r := &Open{ diff --git a/typedapi/indices/open/response.go b/typedapi/indices/open/response.go index d3609b27c6..212644a781 100644 --- a/typedapi/indices/open/response.go +++ b/typedapi/indices/open/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package open // Response holds the response body struct for the package open // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/open/IndicesOpenResponse.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/open/IndicesOpenResponse.ts#L20-L25 type Response struct { Acknowledged bool `json:"acknowledged"` ShardsAcknowledged bool `json:"shards_acknowledged"` diff --git a/typedapi/indices/promotedatastream/promote_data_stream.go b/typedapi/indices/promotedatastream/promote_data_stream.go index 92c06181b8..364795c63d 100644 --- a/typedapi/indices/promotedatastream/promote_data_stream.go +++ b/typedapi/indices/promotedatastream/promote_data_stream.go @@ -16,10 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Promotes a data stream from a replicated data stream managed by CCR to a -// regular data stream +// Promote a data stream. +// Promote a data stream from a replicated data stream managed by cross-cluster +// replication (CCR) to a regular data stream. +// +// With CCR auto following, a data stream from a remote cluster can be +// replicated to the local cluster. +// These data streams can't be rolled over in the local cluster. +// These replicated data streams roll over only if the upstream data stream +// rolls over. +// In the event that the remote cluster is no longer available, the data stream +// in the local cluster can be promoted to a regular data stream, which allows +// these data streams to be rolled over in the local cluster. +// +// NOTE: When promoting a data stream, ensure the local cluster has a data +// stream enabled index template that matches the data stream. +// If this is missing, the data stream will not be able to roll over until a +// matching index template is created. +// This will affect the lifecycle management of the data stream and interfere +// with the data stream size and retention. package promotedatastream import ( @@ -77,10 +94,27 @@ func NewPromoteDataStreamFunc(tp elastictransport.Interface) NewPromoteDataStrea } } -// Promotes a data stream from a replicated data stream managed by CCR to a -// regular data stream +// Promote a data stream. +// Promote a data stream from a replicated data stream managed by cross-cluster +// replication (CCR) to a regular data stream. +// +// With CCR auto following, a data stream from a remote cluster can be +// replicated to the local cluster. +// These data streams can't be rolled over in the local cluster. +// These replicated data streams roll over only if the upstream data stream +// rolls over. +// In the event that the remote cluster is no longer available, the data stream +// in the local cluster can be promoted to a regular data stream, which allows +// these data streams to be rolled over in the local cluster. +// +// NOTE: When promoting a data stream, ensure the local cluster has a data +// stream enabled index template that matches the data stream. +// If this is missing, the data stream will not be able to roll over until a +// matching index template is created. +// This will affect the lifecycle management of the data stream and interfere +// with the data stream size and retention. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-promote-data-stream func New(tp elastictransport.Interface) *PromoteDataStream { r := &PromoteDataStream{ transport: tp, @@ -301,6 +335,15 @@ func (r *PromoteDataStream) _name(name string) *PromoteDataStream { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *PromoteDataStream) MasterTimeout(duration string) *PromoteDataStream { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/promotedatastream/response.go b/typedapi/indices/promotedatastream/response.go index f7e26e1d0b..c381032621 100644 --- a/typedapi/indices/promotedatastream/response.go +++ b/typedapi/indices/promotedatastream/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package promotedatastream @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package promotedatastream // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L25 type Response = json.RawMessage diff --git a/typedapi/indices/putalias/put_alias.go b/typedapi/indices/putalias/put_alias.go index 888ea7499f..11d4c55409 100644 --- a/typedapi/indices/putalias/put_alias.go +++ b/typedapi/indices/putalias/put_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create or update an alias. // Adds a data stream or index to an alias. @@ -90,7 +90,7 @@ func NewPutAliasFunc(tp elastictransport.Interface) NewPutAlias { // Create or update an alias. // Adds a data stream or index to an alias. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-alias func New(tp elastictransport.Interface) *PutAlias { r := &PutAlias{ transport: tp, @@ -98,8 +98,6 @@ func New(tp elastictransport.Interface) *PutAlias { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -420,26 +418,35 @@ func (r *PutAlias) Pretty(pretty bool) *PutAlias { return r } -// Filter Query used to limit documents the alias can access. +// Query used to limit documents the alias can access. // API name: filter -func (r *PutAlias) Filter(filter *types.Query) *PutAlias { +func (r *PutAlias) Filter(filter types.QueryVariant) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.Filter = filter.QueryCaster() return r } -// IndexRouting Value used to route indexing operations to a specific shard. +// Value used to route indexing operations to a specific shard. // If specified, this overwrites the `routing` value for indexing operations. // Data stream aliases don’t support this parameter. // API name: index_routing func (r *PutAlias) IndexRouting(routing string) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexRouting = &routing return r } -// IsWriteIndex If `true`, sets the write index or data stream for the alias. +// If `true`, sets the write index or data stream for the alias. // If an alias points to multiple indices or data streams and `is_write_index` // isn’t set, the alias rejects write requests. // If an index alias points to one index and `is_write_index` isn’t set, the @@ -448,25 +455,40 @@ func (r *PutAlias) IndexRouting(routing string) *PutAlias { // alias points to one data stream. // API name: is_write_index func (r *PutAlias) IsWriteIndex(iswriteindex bool) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IsWriteIndex = &iswriteindex return r } -// Routing Value used to route indexing and search operations to a specific shard. +// Value used to route indexing and search operations to a specific shard. // Data stream aliases don’t support this parameter. // API name: routing func (r *PutAlias) Routing(routing string) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Routing = &routing return r } -// SearchRouting Value used to route search operations to a specific shard. +// Value used to route search operations to a specific shard. // If specified, this overwrites the `routing` value for search operations. // Data stream aliases don’t support this parameter. // API name: search_routing func (r *PutAlias) SearchRouting(routing string) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SearchRouting = &routing return r diff --git a/typedapi/indices/putalias/request.go b/typedapi/indices/putalias/request.go index c3bc4b9cfe..d4c06a223e 100644 --- a/typedapi/indices/putalias/request.go +++ b/typedapi/indices/putalias/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putalias @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putalias // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_alias/IndicesPutAliasRequest.ts#L25-L92 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_alias/IndicesPutAliasRequest.ts#L25-L103 type Request struct { // Filter Query used to limit documents the alias can access. diff --git a/typedapi/indices/putalias/response.go b/typedapi/indices/putalias/response.go index 2527661e49..5572efef36 100644 --- a/typedapi/indices/putalias/response.go +++ b/typedapi/indices/putalias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putalias // Response holds the response body struct for the package putalias // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_alias/IndicesPutAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_alias/IndicesPutAliasResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/putdatalifecycle/put_data_lifecycle.go b/typedapi/indices/putdatalifecycle/put_data_lifecycle.go index 9ecc2064ac..e4e6c64104 100644 --- a/typedapi/indices/putdatalifecycle/put_data_lifecycle.go +++ b/typedapi/indices/putdatalifecycle/put_data_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Update data stream lifecycles. // Update the data stream lifecycle of the specified data streams. @@ -86,7 +86,7 @@ func NewPutDataLifecycleFunc(tp elastictransport.Interface) NewPutDataLifecycle // Update data stream lifecycles. // Update the data stream lifecycle of the specified data streams. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-put-lifecycle.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-data-lifecycle func New(tp elastictransport.Interface) *PutDataLifecycle { r := &PutDataLifecycle{ transport: tp, @@ -94,8 +94,6 @@ func New(tp elastictransport.Interface) *PutDataLifecycle { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -189,6 +187,12 @@ func (r *PutDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, erro req.Header = r.headers.Clone() + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + if req.Header.Get("Accept") == "" { req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") } @@ -392,24 +396,47 @@ func (r *PutDataLifecycle) Pretty(pretty bool) *PutDataLifecycle { return r } -// DataRetention If defined, every document added to this data stream will be stored at least +// If defined, every document added to this data stream will be stored at least // for this time frame. // Any time after this duration the document could be deleted. // When empty, every document in this data stream will be stored indefinitely. // API name: data_retention -func (r *PutDataLifecycle) DataRetention(duration types.Duration) *PutDataLifecycle { - r.req.DataRetention = duration +func (r *PutDataLifecycle) DataRetention(duration types.DurationVariant) *PutDataLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DataRetention = *duration.DurationCaster() return r } -// Downsampling If defined, every backing index will execute the configured downsampling -// configuration after the backing -// index is not the data stream write index anymore. +// The downsampling configuration to execute for the managed backing index after +// rollover. // API name: downsampling -func (r *PutDataLifecycle) Downsampling(downsampling *types.DataStreamLifecycleDownsampling) *PutDataLifecycle { +func (r *PutDataLifecycle) Downsampling(downsampling types.DataStreamLifecycleDownsamplingVariant) *PutDataLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Downsampling = downsampling.DataStreamLifecycleDownsamplingCaster() + + return r +} + +// If defined, it turns data stream lifecycle on/off (`true`/`false`) for this +// data stream. A data stream lifecycle +// that's disabled (enabled: `false`) will have no effect on the data stream. +// API name: enabled +func (r *PutDataLifecycle) Enabled(enabled bool) *PutDataLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Downsampling = downsampling + r.req.Enabled = &enabled return r } diff --git a/typedapi/indices/putdatalifecycle/request.go b/typedapi/indices/putdatalifecycle/request.go index 27e0d3fd28..7064a7b042 100644 --- a/typedapi/indices/putdatalifecycle/request.go +++ b/typedapi/indices/putdatalifecycle/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putdatalifecycle @@ -26,13 +26,14 @@ import ( "errors" "fmt" "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Request holds the request body struct for the package putdatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleRequest.ts#L25-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleRequest.ts#L25-L93 type Request struct { // DataRetention If defined, every document added to this data stream will be stored at least @@ -40,10 +41,13 @@ type Request struct { // Any time after this duration the document could be deleted. // When empty, every document in this data stream will be stored indefinitely. DataRetention types.Duration `json:"data_retention,omitempty"` - // Downsampling If defined, every backing index will execute the configured downsampling - // configuration after the backing - // index is not the data stream write index anymore. + // Downsampling The downsampling configuration to execute for the managed backing index after + // rollover. Downsampling *types.DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` + // Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this + // data stream. A data stream lifecycle + // that's disabled (enabled: `false`) will have no effect on the data stream. + Enabled *bool `json:"enabled,omitempty"` } // NewRequest returns a Request @@ -89,6 +93,20 @@ func (s *Request) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Downsampling", err) } + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + } } return nil diff --git a/typedapi/indices/putdatalifecycle/response.go b/typedapi/indices/putdatalifecycle/response.go index aa065ec32e..2bae6a0e31 100644 --- a/typedapi/indices/putdatalifecycle/response.go +++ b/typedapi/indices/putdatalifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putdatalifecycle // Response holds the response body struct for the package putdatalifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/putindextemplate/put_index_template.go b/typedapi/indices/putindextemplate/put_index_template.go index 4d01968bed..991386c86c 100644 --- a/typedapi/indices/putindextemplate/put_index_template.go +++ b/typedapi/indices/putindextemplate/put_index_template.go @@ -16,11 +16,54 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. +// +// Elasticsearch applies templates to new indices based on an wildcard pattern +// that matches the index name. +// Index templates are applied during data stream or index creation. +// For data streams, these settings and mappings are applied when the stream's +// backing indices are created. +// Settings and mappings specified in a create index API request override any +// settings or mappings specified in an index template. +// Changes to index templates do not affect existing indices, including the +// existing backing indices of a data stream. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Multiple matching templates** +// +// If multiple index templates match the name of a new index or data stream, the +// template with the highest priority is used. +// +// Multiple templates with overlapping index patterns at the same priority are +// not allowed and an error will be thrown when attempting to create a template +// matching an existing index template at identical priorities. +// +// **Composing aliases, mappings, and settings** +// +// When multiple component templates are specified in the `composed_of` field +// for an index template, they are merged in the order specified, meaning that +// later component templates override earlier component templates. +// Any mappings, settings, or aliases from the parent index template are merged +// in next. +// Finally, any configuration on the index request itself is merged. +// Mapping definitions are merged recursively, which means that later mapping +// components can introduce new field mappings and update the mapping +// configuration. +// If a field mapping is already contained in an earlier component, its +// definition will be completely overwritten by the later one. +// This recursive merging strategy applies not only to field mappings, but also +// root options like `dynamic_templates` and `meta`. +// If an earlier component contains a `dynamic_templates` block, then by default +// new `dynamic_templates` entries are appended onto the end. +// If an entry already exists with the same key, then it is overwritten by the +// new definition. package putindextemplate import ( @@ -87,7 +130,50 @@ func NewPutIndexTemplateFunc(tp elastictransport.Interface) NewPutIndexTemplate // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-template.html +// Elasticsearch applies templates to new indices based on an wildcard pattern +// that matches the index name. +// Index templates are applied during data stream or index creation. +// For data streams, these settings and mappings are applied when the stream's +// backing indices are created. +// Settings and mappings specified in a create index API request override any +// settings or mappings specified in an index template. +// Changes to index templates do not affect existing indices, including the +// existing backing indices of a data stream. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Multiple matching templates** +// +// If multiple index templates match the name of a new index or data stream, the +// template with the highest priority is used. +// +// Multiple templates with overlapping index patterns at the same priority are +// not allowed and an error will be thrown when attempting to create a template +// matching an existing index template at identical priorities. +// +// **Composing aliases, mappings, and settings** +// +// When multiple component templates are specified in the `composed_of` field +// for an index template, they are merged in the order specified, meaning that +// later component templates override earlier component templates. +// Any mappings, settings, or aliases from the parent index template are merged +// in next. +// Finally, any configuration on the index request itself is merged. +// Mapping definitions are merged recursively, which means that later mapping +// components can introduce new field mappings and update the mapping +// configuration. +// If a field mapping is already contained in an earlier component, its +// definition will be completely overwritten by the later one. +// This recursive merging strategy applies not only to field mappings, but also +// root options like `dynamic_templates` and `meta`. +// If an earlier component contains a `dynamic_templates` block, then by default +// new `dynamic_templates` entries are appended onto the end. +// If an entry already exists with the same key, then it is overwritten by the +// new definition. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-index-template func New(tp elastictransport.Interface) *PutIndexTemplate { r := &PutIndexTemplate{ transport: tp, @@ -95,8 +181,6 @@ func New(tp elastictransport.Interface) *PutIndexTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -387,7 +471,7 @@ func (r *PutIndexTemplate) Pretty(pretty bool) *PutIndexTemplate { return r } -// AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster +// This setting overrides the value of the `action.auto_create_index` cluster // setting. // If set to `true` in a template, then indices can be automatically created // using that template even if auto-creation of indices is disabled via @@ -396,73 +480,114 @@ func (r *PutIndexTemplate) Pretty(pretty bool) *PutIndexTemplate { // always be explicitly created, and may never be automatically created. // API name: allow_auto_create func (r *PutIndexTemplate) AllowAutoCreate(allowautocreate bool) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowAutoCreate = &allowautocreate return r } -// ComposedOf An ordered list of component template names. +// An ordered list of component template names. // Component templates are merged in the order specified, meaning that the last // component template specified has the highest precedence. // API name: composed_of func (r *PutIndexTemplate) ComposedOf(composedofs ...string) *PutIndexTemplate { - r.req.ComposedOf = composedofs + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range composedofs { + + r.req.ComposedOf = append(r.req.ComposedOf, v) + } return r } -// DataStream If this object is included, the template is used to create data streams and +// If this object is included, the template is used to create data streams and // their backing indices. // Supports an empty object. // Data streams require a matching index template with a `data_stream` object. // API name: data_stream -func (r *PutIndexTemplate) DataStream(datastream *types.DataStreamVisibility) *PutIndexTemplate { +func (r *PutIndexTemplate) DataStream(datastream types.DataStreamVisibilityVariant) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DataStream = datastream + r.req.DataStream = datastream.DataStreamVisibilityCaster() return r } -// Deprecated Marks this index template as deprecated. When creating or updating a +// Marks this index template as deprecated. When creating or updating a // non-deprecated index template // that uses deprecated components, Elasticsearch will emit a deprecation // warning. // API name: deprecated func (r *PutIndexTemplate) Deprecated(deprecated bool) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Deprecated = &deprecated return r } -// IgnoreMissingComponentTemplates The configuration option ignore_missing_component_templates can be used when +// The configuration option ignore_missing_component_templates can be used when // an index template // references a component template that might not exist // API name: ignore_missing_component_templates func (r *PutIndexTemplate) IgnoreMissingComponentTemplates(ignoremissingcomponenttemplates ...string) *PutIndexTemplate { - r.req.IgnoreMissingComponentTemplates = ignoremissingcomponenttemplates + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoremissingcomponenttemplates { + + r.req.IgnoreMissingComponentTemplates = append(r.req.IgnoreMissingComponentTemplates, v) + } return r } -// IndexPatterns Name of the index template to create. +// Name of the index template to create. // API name: index_patterns func (r *PutIndexTemplate) IndexPatterns(indices ...string) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexPatterns = indices return r } -// Meta_ Optional user metadata about the index template. -// May have any contents. -// This map is not automatically generated by Elasticsearch. +// Optional user metadata about the index template. +// It may have any contents. +// It is not automatically generated or used by Elasticsearch. +// This user-defined object is stored in the cluster state, so keeping it short +// is preferable +// To unset the metadata, replace the template without specifying it. // API name: _meta -func (r *PutIndexTemplate) Meta_(metadata types.Metadata) *PutIndexTemplate { - r.req.Meta_ = metadata +func (r *PutIndexTemplate) Meta_(metadata types.MetadataVariant) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// Priority Priority to determine index template precedence when a new data stream or +// Priority to determine index template precedence when a new data stream or // index is created. // The index template with the highest priority is chosen. // If no priority is specified the template is treated as though it is of @@ -470,27 +595,43 @@ func (r *PutIndexTemplate) Meta_(metadata types.Metadata) *PutIndexTemplate { // This number is not automatically generated by Elasticsearch. // API name: priority func (r *PutIndexTemplate) Priority(priority int64) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Priority = &priority return r } -// Template Template to be applied. +// Template to be applied. // It may optionally include an `aliases`, `mappings`, or `settings` // configuration. // API name: template -func (r *PutIndexTemplate) Template(template *types.IndexTemplateMapping) *PutIndexTemplate { +func (r *PutIndexTemplate) Template(template types.IndexTemplateMappingVariant) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Template = template + r.req.Template = template.IndexTemplateMappingCaster() return r } -// Version Version number used to manage index templates externally. +// Version number used to manage index templates externally. // This number is not automatically generated by Elasticsearch. +// External systems can use these version numbers to simplify template +// management. +// To unset a version, replace the template without specifying one. // API name: version func (r *PutIndexTemplate) Version(versionnumber int64) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/indices/putindextemplate/request.go b/typedapi/indices/putindextemplate/request.go index 05cc171395..20574b5aed 100644 --- a/typedapi/indices/putindextemplate/request.go +++ b/typedapi/indices/putindextemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putindextemplate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L119 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L157 type Request struct { // AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster @@ -65,8 +65,11 @@ type Request struct { // IndexPatterns Name of the index template to create. IndexPatterns []string `json:"index_patterns,omitempty"` // Meta_ Optional user metadata about the index template. - // May have any contents. - // This map is not automatically generated by Elasticsearch. + // It may have any contents. + // It is not automatically generated or used by Elasticsearch. + // This user-defined object is stored in the cluster state, so keeping it short + // is preferable + // To unset the metadata, replace the template without specifying it. Meta_ types.Metadata `json:"_meta,omitempty"` // Priority Priority to determine index template precedence when a new data stream or // index is created. @@ -81,6 +84,9 @@ type Request struct { Template *types.IndexTemplateMapping `json:"template,omitempty"` // Version Version number used to manage index templates externally. // This number is not automatically generated by Elasticsearch. + // External systems can use these version numbers to simplify template + // management. + // To unset a version, replace the template without specifying one. Version *int64 `json:"version,omitempty"` } diff --git a/typedapi/indices/putindextemplate/response.go b/typedapi/indices/putindextemplate/response.go index 698dab2bcc..05ca0b0693 100644 --- a/typedapi/indices/putindextemplate/response.go +++ b/typedapi/indices/putindextemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putindextemplate // Response holds the response body struct for the package putindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_index_template/IndicesPutIndexTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_index_template/IndicesPutIndexTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/putmapping/put_mapping.go b/typedapi/indices/putmapping/put_mapping.go index d4a8332388..d2bf9906b9 100644 --- a/typedapi/indices/putmapping/put_mapping.go +++ b/typedapi/indices/putmapping/put_mapping.go @@ -16,13 +16,47 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Update field mappings. -// Adds new fields to an existing data stream or index. -// You can also use this API to change the search settings of existing fields. +// Add new fields to an existing data stream or index. +// You can also use this API to change the search settings of existing fields +// and add new properties to existing object fields. // For data streams, these changes are applied to all backing indices by // default. +// +// **Add multi-fields to an existing field** +// +// Multi-fields let you index the same field in different ways. +// You can use this API to update the fields mapping parameter and enable +// multi-fields for an existing field. +// WARNING: If an index (or data stream) contains documents when you add a +// multi-field, those documents will not have values for the new multi-field. +// You can populate the new multi-field with the update by query API. +// +// **Change supported mapping parameters for an existing field** +// +// The documentation for each mapping parameter indicates whether you can update +// it for an existing field using this API. +// For example, you can use the update mapping API to update the `ignore_above` +// parameter. +// +// **Change the mapping of an existing field** +// +// Except for supported mapping parameters, you can't change the mapping or +// field type of an existing field. +// Changing an existing field could invalidate data that's already indexed. +// +// If you need to change the mapping of a field in a data stream's backing +// indices, refer to documentation about modifying data streams. +// If you need to change the mapping of a field in other indices, create a new +// index with the correct mapping and reindex your data into that index. +// +// **Rename a field** +// +// Renaming a field would invalidate data already indexed under the old field +// name. +// Instead, add an alias field to create an alternate field name. package putmapping import ( @@ -88,11 +122,45 @@ func NewPutMappingFunc(tp elastictransport.Interface) NewPutMapping { } // Update field mappings. -// Adds new fields to an existing data stream or index. -// You can also use this API to change the search settings of existing fields. +// Add new fields to an existing data stream or index. +// You can also use this API to change the search settings of existing fields +// and add new properties to existing object fields. // For data streams, these changes are applied to all backing indices by // default. // +// **Add multi-fields to an existing field** +// +// Multi-fields let you index the same field in different ways. +// You can use this API to update the fields mapping parameter and enable +// multi-fields for an existing field. +// WARNING: If an index (or data stream) contains documents when you add a +// multi-field, those documents will not have values for the new multi-field. +// You can populate the new multi-field with the update by query API. +// +// **Change supported mapping parameters for an existing field** +// +// The documentation for each mapping parameter indicates whether you can update +// it for an existing field using this API. +// For example, you can use the update mapping API to update the `ignore_above` +// parameter. +// +// **Change the mapping of an existing field** +// +// Except for supported mapping parameters, you can't change the mapping or +// field type of an existing field. +// Changing an existing field could invalidate data that's already indexed. +// +// If you need to change the mapping of a field in a data stream's backing +// indices, refer to documentation about modifying data streams. +// If you need to change the mapping of a field in other indices, create a new +// index with the correct mapping and reindex your data into that index. +// +// **Rename a field** +// +// Renaming a field would invalidate data already indexed under the old field +// name. +// Instead, add an alias field to create an alternate field name. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html func New(tp elastictransport.Interface) *PutMapping { r := &PutMapping{ @@ -101,8 +169,6 @@ func New(tp elastictransport.Interface) *PutMapping { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -432,102 +498,170 @@ func (r *PutMapping) Pretty(pretty bool) *PutMapping { return r } -// DateDetection Controls whether dynamic date detection is enabled. +// Controls whether dynamic date detection is enabled. // API name: date_detection func (r *PutMapping) DateDetection(datedetection bool) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DateDetection = &datedetection return r } -// Dynamic Controls whether new fields are added dynamically. +// Controls whether new fields are added dynamically. // API name: dynamic func (r *PutMapping) Dynamic(dynamic dynamicmapping.DynamicMapping) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Dynamic = &dynamic - return r } -// DynamicDateFormats If date detection is enabled then new string fields are checked +// If date detection is enabled then new string fields are checked // against 'dynamic_date_formats' and if the value matches then // a new date field is added instead of string. // API name: dynamic_date_formats func (r *PutMapping) DynamicDateFormats(dynamicdateformats ...string) *PutMapping { - r.req.DynamicDateFormats = dynamicdateformats + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range dynamicdateformats { + r.req.DynamicDateFormats = append(r.req.DynamicDateFormats, v) + + } return r } -// DynamicTemplates Specify dynamic templates for the mapping. +// Specify dynamic templates for the mapping. // API name: dynamic_templates func (r *PutMapping) DynamicTemplates(dynamictemplates []map[string]types.DynamicTemplate) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DynamicTemplates = dynamictemplates return r } -// FieldNames_ Control whether field names are enabled for the index. +// Control whether field names are enabled for the index. // API name: _field_names -func (r *PutMapping) FieldNames_(fieldnames_ *types.FieldNamesField) *PutMapping { +func (r *PutMapping) FieldNames_(fieldnames_ types.FieldNamesFieldVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.FieldNames_ = fieldnames_ + r.req.FieldNames_ = fieldnames_.FieldNamesFieldCaster() return r } -// Meta_ A mapping type can have custom meta data associated with it. These are +// A mapping type can have custom meta data associated with it. These are // not used at all by Elasticsearch, but can be used to store // application-specific metadata. // API name: _meta -func (r *PutMapping) Meta_(metadata types.Metadata) *PutMapping { - r.req.Meta_ = metadata +func (r *PutMapping) Meta_(metadata types.MetadataVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// NumericDetection Automatically map strings into numeric data types for all fields. +// Automatically map strings into numeric data types for all fields. // API name: numeric_detection func (r *PutMapping) NumericDetection(numericdetection bool) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumericDetection = &numericdetection return r } -// Properties Mapping for a field. For new fields, this mapping can include: +// Mapping for a field. For new fields, this mapping can include: // // - Field name // - Field data type // - Mapping parameters // API name: properties func (r *PutMapping) Properties(properties map[string]types.Property) *PutMapping { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Properties = properties + return r +} + +func (r *PutMapping) AddProperty(key string, value types.PropertyVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Property + if r.req.Properties == nil { + r.req.Properties = make(map[string]types.Property) + } else { + tmp = r.req.Properties + } + tmp[key] = *value.PropertyCaster() + + r.req.Properties = tmp return r } -// Routing_ Enable making a routing value required on indexed documents. +// Enable making a routing value required on indexed documents. // API name: _routing -func (r *PutMapping) Routing_(routing_ *types.RoutingField) *PutMapping { +func (r *PutMapping) Routing_(routing_ types.RoutingFieldVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Routing_ = routing_ + r.req.Routing_ = routing_.RoutingFieldCaster() return r } -// Runtime Mapping of runtime fields for the index. +// Mapping of runtime fields for the index. // API name: runtime -func (r *PutMapping) Runtime(runtimefields types.RuntimeFields) *PutMapping { - r.req.Runtime = runtimefields +func (r *PutMapping) Runtime(runtimefields types.RuntimeFieldsVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Runtime = *runtimefields.RuntimeFieldsCaster() return r } -// Source_ Control whether the _source field is enabled on the index. +// Control whether the _source field is enabled on the index. // API name: _source -func (r *PutMapping) Source_(source_ *types.SourceField) *PutMapping { +func (r *PutMapping) Source_(source_ types.SourceFieldVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source_ = source_ + r.req.Source_ = source_.SourceFieldCaster() return r } diff --git a/typedapi/indices/putmapping/request.go b/typedapi/indices/putmapping/request.go index b17ff3bdcb..f46a8e0a4c 100644 --- a/typedapi/indices/putmapping/request.go +++ b/typedapi/indices/putmapping/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putmapping @@ -34,7 +34,7 @@ import ( // Request holds the request body struct for the package putmapping // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_mapping/IndicesPutMappingRequest.ts#L42-L150 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_mapping/IndicesPutMappingRequest.ts#L41-L181 type Request struct { // DateDetection Controls whether dynamic date detection is enabled. @@ -129,24 +129,8 @@ func (s *Request) UnmarshalJSON(data []byte) error { } case "dynamic_templates": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]types.DynamicTemplate, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "DynamicTemplates", err) - } - s.DynamicTemplates = append(s.DynamicTemplates, o) - case '[': - o := make([]map[string]types.DynamicTemplate, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "DynamicTemplates", err) - } - s.DynamicTemplates = o + if err := dec.Decode(&s.DynamicTemplates); err != nil { + return fmt.Errorf("%s | %w", "DynamicTemplates", err) } case "_field_names": @@ -192,301 +176,313 @@ func (s *Request) UnmarshalJSON(data []byte) error { case "binary": oo := types.NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := types.NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := types.NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := types.NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := types.NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := types.NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := types.NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := types.NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := types.NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := types.NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := types.NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := types.NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := types.NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := types.NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := types.NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := types.NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := types.NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := types.NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := types.NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := types.NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := types.NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := types.NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := types.NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := types.NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := types.NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := types.NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := types.NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := types.NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := types.NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := types.NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := types.NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := types.NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := types.NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := types.NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := types.NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := types.NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := types.NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := types.NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := types.NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := types.NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := types.NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := types.NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := types.NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := types.NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := types.NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := types.NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := types.NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := types.NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := types.NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := types.NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := types.NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(types.Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(types.Property) | %w", err) } s.Properties[key] = oo } diff --git a/typedapi/indices/putmapping/response.go b/typedapi/indices/putmapping/response.go index ed1de3ee58..cc4231c9f8 100644 --- a/typedapi/indices/putmapping/response.go +++ b/typedapi/indices/putmapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putmapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putmapping // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_mapping/IndicesPutMappingResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_mapping/IndicesPutMappingResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/putsettings/put_settings.go b/typedapi/indices/putsettings/put_settings.go index e92f23f151..0e4633aa2b 100644 --- a/typedapi/indices/putsettings/put_settings.go +++ b/typedapi/indices/putsettings/put_settings.go @@ -16,11 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Update index settings. -// Changes dynamic index settings in real time. For data streams, index setting -// changes are applied to all backing indices by default. +// Changes dynamic index settings in real time. +// For data streams, index setting changes are applied to all backing indices by +// default. +// +// To revert a setting to the default value, use a null value. +// The list of per-index settings that can be updated dynamically on live +// indices can be found in index module documentation. +// To preserve existing settings from being updated, set the `preserve_existing` +// parameter to `true`. +// +// NOTE: You can only define new analyzers on closed indices. +// To add an analyzer, you must close the index, define the analyzer, and reopen +// the index. +// You cannot close the write index of a data stream. +// To update the analyzer for a data stream's write index and future backing +// indices, update the analyzer in the index template used by the stream. +// Then roll over the data stream to apply the new analyzer to the stream's +// write index and future backing indices. +// This affects searches and any new data added to the stream after the +// rollover. +// However, it does not affect the data stream's backing indices or their +// existing data. +// To change the analyzer for existing backing indices, you must create a new +// data stream and reindex your data into it. package putsettings import ( @@ -84,8 +106,30 @@ func NewPutSettingsFunc(tp elastictransport.Interface) NewPutSettings { } // Update index settings. -// Changes dynamic index settings in real time. For data streams, index setting -// changes are applied to all backing indices by default. +// Changes dynamic index settings in real time. +// For data streams, index setting changes are applied to all backing indices by +// default. +// +// To revert a setting to the default value, use a null value. +// The list of per-index settings that can be updated dynamically on live +// indices can be found in index module documentation. +// To preserve existing settings from being updated, set the `preserve_existing` +// parameter to `true`. +// +// NOTE: You can only define new analyzers on closed indices. +// To add an analyzer, you must close the index, define the analyzer, and reopen +// the index. +// You cannot close the write index of a data stream. +// To update the analyzer for a data stream's write index and future backing +// indices, update the analyzer in the index template used by the stream. +// Then roll over the data stream to apply the new analyzer to the stream's +// write index and future backing indices. +// This affects searches and any new data added to the stream after the +// rollover. +// However, it does not affect the data stream's backing indices or their +// existing data. +// To change the analyzer for existing backing indices, you must create a new +// data stream and reindex your data into it. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html func New(tp elastictransport.Interface) *PutSettings { @@ -95,8 +139,6 @@ func New(tp elastictransport.Interface) *PutSettings { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -441,48 +483,71 @@ func (r *PutSettings) Pretty(pretty bool) *PutSettings { } // API name: analysis -func (r *PutSettings) Analysis(analysis *types.IndexSettingsAnalysis) *PutSettings { +func (r *PutSettings) Analysis(analysis types.IndexSettingsAnalysisVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Analysis = analysis + r.req.Analysis = analysis.IndexSettingsAnalysisCaster() return r } -// Analyze Settings to define analyzers, tokenizers, token filters and character +// Settings to define analyzers, tokenizers, token filters and character // filters. // API name: analyze -func (r *PutSettings) Analyze(analyze *types.SettingsAnalyze) *PutSettings { +func (r *PutSettings) Analyze(analyze types.SettingsAnalyzeVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Analyze = analyze + r.req.Analyze = analyze.SettingsAnalyzeCaster() return r } // API name: auto_expand_replicas -func (r *PutSettings) AutoExpandReplicas(autoexpandreplicas string) *PutSettings { +func (r *PutSettings) AutoExpandReplicas(autoexpandreplicas any) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AutoExpandReplicas = &autoexpandreplicas + r.req.AutoExpandReplicas = autoexpandreplicas return r } // API name: blocks -func (r *PutSettings) Blocks(blocks *types.IndexSettingBlocks) *PutSettings { +func (r *PutSettings) Blocks(blocks types.IndexSettingBlocksVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Blocks = blocks + r.req.Blocks = blocks.IndexSettingBlocksCaster() return r } // API name: check_on_startup func (r *PutSettings) CheckOnStartup(checkonstartup indexcheckonstartup.IndexCheckOnStartup) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.CheckOnStartup = &checkonstartup - return r } // API name: codec func (r *PutSettings) Codec(codec string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Codec = &codec @@ -490,21 +555,36 @@ func (r *PutSettings) Codec(codec string) *PutSettings { } // API name: creation_date -func (r *PutSettings) CreationDate(stringifiedepochtimeunitmillis types.StringifiedEpochTimeUnitMillis) *PutSettings { - r.req.CreationDate = stringifiedepochtimeunitmillis +func (r *PutSettings) CreationDate(stringifiedepochtimeunitmillis types.StringifiedEpochTimeUnitMillisVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CreationDate = *stringifiedepochtimeunitmillis.StringifiedEpochTimeUnitMillisCaster() return r } // API name: creation_date_string -func (r *PutSettings) CreationDateString(datetime types.DateTime) *PutSettings { - r.req.CreationDateString = datetime +func (r *PutSettings) CreationDateString(datetime types.DateTimeVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CreationDateString = *datetime.DateTimeCaster() return r } // API name: default_pipeline func (r *PutSettings) DefaultPipeline(pipelinename string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DefaultPipeline = &pipelinename return r @@ -512,6 +592,11 @@ func (r *PutSettings) DefaultPipeline(pipelinename string) *PutSettings { // API name: final_pipeline func (r *PutSettings) FinalPipeline(pipelinename string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FinalPipeline = &pipelinename return r @@ -519,92 +604,162 @@ func (r *PutSettings) FinalPipeline(pipelinename string) *PutSettings { // API name: format func (r *PutSettings) Format(format string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Format = format return r } // API name: gc_deletes -func (r *PutSettings) GcDeletes(duration types.Duration) *PutSettings { - r.req.GcDeletes = duration +func (r *PutSettings) GcDeletes(duration types.DurationVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.GcDeletes = *duration.DurationCaster() return r } // API name: hidden func (r *PutSettings) Hidden(hidden string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Hidden = hidden return r } // API name: highlight -func (r *PutSettings) Highlight(highlight *types.SettingsHighlight) *PutSettings { +func (r *PutSettings) Highlight(highlight types.SettingsHighlightVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Highlight = highlight + r.req.Highlight = highlight.SettingsHighlightCaster() return r } // API name: index -func (r *PutSettings) Index(index *types.IndexSettings) *PutSettings { +func (r *PutSettings) Index(index types.IndexSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Index = index + r.req.Index = index.IndexSettingsCaster() return r } // API name: IndexSettings func (r *PutSettings) IndexSettings(indexsettings map[string]json.RawMessage) *PutSettings { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.IndexSettings = indexsettings + return r +} + +func (r *PutSettings) AddIndexSetting(key string, value json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.IndexSettings == nil { + r.req.IndexSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.IndexSettings + } + tmp[key] = value + + r.req.IndexSettings = tmp return r } -// IndexingPressure Configure indexing back pressure limits. +// Configure indexing back pressure limits. // API name: indexing_pressure -func (r *PutSettings) IndexingPressure(indexingpressure *types.IndicesIndexingPressure) *PutSettings { +func (r *PutSettings) IndexingPressure(indexingpressure types.IndicesIndexingPressureVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexingPressure = indexingpressure + r.req.IndexingPressure = indexingpressure.IndicesIndexingPressureCaster() return r } // API name: indexing.slowlog -func (r *PutSettings) IndexingSlowlog(indexingslowlog *types.IndexingSlowlogSettings) *PutSettings { +func (r *PutSettings) IndexingSlowlog(indexingslowlog types.IndexingSlowlogSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexingSlowlog = indexingslowlog + r.req.IndexingSlowlog = indexingslowlog.IndexingSlowlogSettingsCaster() return r } // API name: lifecycle -func (r *PutSettings) Lifecycle(lifecycle *types.IndexSettingsLifecycle) *PutSettings { +func (r *PutSettings) Lifecycle(lifecycle types.IndexSettingsLifecycleVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Lifecycle = lifecycle + r.req.Lifecycle = lifecycle.IndexSettingsLifecycleCaster() return r } // API name: load_fixed_bitset_filters_eagerly func (r *PutSettings) LoadFixedBitsetFiltersEagerly(loadfixedbitsetfilterseagerly bool) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.LoadFixedBitsetFiltersEagerly = &loadfixedbitsetfilterseagerly return r } -// Mapping Enable or disable dynamic mapping for an index. +// Enable or disable dynamic mapping for an index. // API name: mapping -func (r *PutSettings) Mapping(mapping *types.MappingLimitSettings) *PutSettings { +func (r *PutSettings) Mapping(mapping types.MappingLimitSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Mapping = mapping + r.req.Mapping = mapping.MappingLimitSettingsCaster() return r } // API name: max_docvalue_fields_search func (r *PutSettings) MaxDocvalueFieldsSearch(maxdocvaluefieldssearch int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxDocvalueFieldsSearch = &maxdocvaluefieldssearch return r @@ -612,6 +767,11 @@ func (r *PutSettings) MaxDocvalueFieldsSearch(maxdocvaluefieldssearch int) *PutS // API name: max_inner_result_window func (r *PutSettings) MaxInnerResultWindow(maxinnerresultwindow int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxInnerResultWindow = &maxinnerresultwindow return r @@ -619,6 +779,11 @@ func (r *PutSettings) MaxInnerResultWindow(maxinnerresultwindow int) *PutSetting // API name: max_ngram_diff func (r *PutSettings) MaxNgramDiff(maxngramdiff int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxNgramDiff = &maxngramdiff return r @@ -626,6 +791,11 @@ func (r *PutSettings) MaxNgramDiff(maxngramdiff int) *PutSettings { // API name: max_refresh_listeners func (r *PutSettings) MaxRefreshListeners(maxrefreshlisteners int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxRefreshListeners = &maxrefreshlisteners return r @@ -633,6 +803,11 @@ func (r *PutSettings) MaxRefreshListeners(maxrefreshlisteners int) *PutSettings // API name: max_regex_length func (r *PutSettings) MaxRegexLength(maxregexlength int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxRegexLength = &maxregexlength return r @@ -640,6 +815,11 @@ func (r *PutSettings) MaxRegexLength(maxregexlength int) *PutSettings { // API name: max_rescore_window func (r *PutSettings) MaxRescoreWindow(maxrescorewindow int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxRescoreWindow = &maxrescorewindow return r @@ -647,6 +827,11 @@ func (r *PutSettings) MaxRescoreWindow(maxrescorewindow int) *PutSettings { // API name: max_result_window func (r *PutSettings) MaxResultWindow(maxresultwindow int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxResultWindow = &maxresultwindow return r @@ -654,6 +839,11 @@ func (r *PutSettings) MaxResultWindow(maxresultwindow int) *PutSettings { // API name: max_script_fields func (r *PutSettings) MaxScriptFields(maxscriptfields int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxScriptFields = &maxscriptfields return r @@ -661,6 +851,11 @@ func (r *PutSettings) MaxScriptFields(maxscriptfields int) *PutSettings { // API name: max_shingle_diff func (r *PutSettings) MaxShingleDiff(maxshinglediff int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxShingleDiff = &maxshinglediff return r @@ -668,6 +863,11 @@ func (r *PutSettings) MaxShingleDiff(maxshinglediff int) *PutSettings { // API name: max_slices_per_scroll func (r *PutSettings) MaxSlicesPerScroll(maxslicesperscroll int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxSlicesPerScroll = &maxslicesperscroll return r @@ -675,21 +875,34 @@ func (r *PutSettings) MaxSlicesPerScroll(maxslicesperscroll int) *PutSettings { // API name: max_terms_count func (r *PutSettings) MaxTermsCount(maxtermscount int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxTermsCount = &maxtermscount return r } // API name: merge -func (r *PutSettings) Merge(merge *types.Merge) *PutSettings { +func (r *PutSettings) Merge(merge types.MergeVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Merge = merge + r.req.Merge = merge.MergeCaster() return r } // API name: mode func (r *PutSettings) Mode(mode string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Mode = &mode @@ -698,6 +911,11 @@ func (r *PutSettings) Mode(mode string) *PutSettings { // API name: number_of_replicas func (r *PutSettings) NumberOfReplicas(numberofreplicas string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumberOfReplicas = numberofreplicas return r @@ -705,6 +923,11 @@ func (r *PutSettings) NumberOfReplicas(numberofreplicas string) *PutSettings { // API name: number_of_routing_shards func (r *PutSettings) NumberOfRoutingShards(numberofroutingshards int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumberOfRoutingShards = &numberofroutingshards return r @@ -712,6 +935,11 @@ func (r *PutSettings) NumberOfRoutingShards(numberofroutingshards int) *PutSetti // API name: number_of_shards func (r *PutSettings) NumberOfShards(numberofshards string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumberOfShards = numberofshards return r @@ -719,6 +947,11 @@ func (r *PutSettings) NumberOfShards(numberofshards string) *PutSettings { // API name: priority func (r *PutSettings) Priority(priority string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Priority = priority return r @@ -726,133 +959,224 @@ func (r *PutSettings) Priority(priority string) *PutSettings { // API name: provided_name func (r *PutSettings) ProvidedName(name string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ProvidedName = &name return r } // API name: queries -func (r *PutSettings) Queries(queries *types.Queries) *PutSettings { +func (r *PutSettings) Queries(queries types.QueriesVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Queries = queries + r.req.Queries = queries.QueriesCaster() return r } // API name: query_string -func (r *PutSettings) QueryString(querystring *types.SettingsQueryString) *PutSettings { +func (r *PutSettings) QueryString(querystring types.SettingsQueryStringVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.QueryString = querystring + r.req.QueryString = querystring.SettingsQueryStringCaster() return r } // API name: refresh_interval -func (r *PutSettings) RefreshInterval(duration types.Duration) *PutSettings { - r.req.RefreshInterval = duration +func (r *PutSettings) RefreshInterval(duration types.DurationVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshInterval = *duration.DurationCaster() return r } // API name: routing -func (r *PutSettings) Routing(routing *types.IndexRouting) *PutSettings { +func (r *PutSettings) Routing(routing types.IndexRoutingVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Routing = routing + r.req.Routing = routing.IndexRoutingCaster() return r } // API name: routing_partition_size -func (r *PutSettings) RoutingPartitionSize(stringifiedinteger types.Stringifiedinteger) *PutSettings { - r.req.RoutingPartitionSize = stringifiedinteger +func (r *PutSettings) RoutingPartitionSize(stringifiedinteger types.StringifiedintegerVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RoutingPartitionSize = *stringifiedinteger.StringifiedintegerCaster() return r } // API name: routing_path func (r *PutSettings) RoutingPath(routingpaths ...string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RoutingPath = make([]string, len(routingpaths)) r.req.RoutingPath = routingpaths return r } // API name: search -func (r *PutSettings) Search(search *types.SettingsSearch) *PutSettings { +func (r *PutSettings) Search(search types.SettingsSearchVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Search = search + r.req.Search = search.SettingsSearchCaster() return r } // API name: settings -func (r *PutSettings) Settings(settings *types.IndexSettings) *PutSettings { +func (r *PutSettings) Settings(settings types.IndexSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.IndexSettingsCaster() return r } -// Similarity Configure custom similarity settings to customize how search results are +// Configure custom similarity settings to customize how search results are // scored. // API name: similarity func (r *PutSettings) Similarity(similarity map[string]types.SettingsSimilarity) *PutSettings { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Similarity = similarity + return r +} + +func (r *PutSettings) AddSimilarity(key string, value types.SettingsSimilarityVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.SettingsSimilarity + if r.req.Similarity == nil { + r.req.Similarity = make(map[string]types.SettingsSimilarity) + } else { + tmp = r.req.Similarity + } + tmp[key] = *value.SettingsSimilarityCaster() + + r.req.Similarity = tmp return r } // API name: soft_deletes -func (r *PutSettings) SoftDeletes(softdeletes *types.SoftDeletes) *PutSettings { +func (r *PutSettings) SoftDeletes(softdeletes types.SoftDeletesVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.SoftDeletes = softdeletes + r.req.SoftDeletes = softdeletes.SoftDeletesCaster() return r } // API name: sort -func (r *PutSettings) Sort(sort *types.IndexSegmentSort) *PutSettings { +func (r *PutSettings) Sort(sort types.IndexSegmentSortVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Sort = sort + r.req.Sort = sort.IndexSegmentSortCaster() return r } -// Store The store module allows you to control how index data is stored and accessed +// The store module allows you to control how index data is stored and accessed // on disk. // API name: store -func (r *PutSettings) Store(store *types.Storage) *PutSettings { +func (r *PutSettings) Store(store types.StorageVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Store = store + r.req.Store = store.StorageCaster() return r } // API name: time_series -func (r *PutSettings) TimeSeries(timeseries *types.IndexSettingsTimeSeries) *PutSettings { +func (r *PutSettings) TimeSeries(timeseries types.IndexSettingsTimeSeriesVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.TimeSeries = timeseries + r.req.TimeSeries = timeseries.IndexSettingsTimeSeriesCaster() return r } // API name: top_metrics_max_size func (r *PutSettings) TopMetricsMaxSize(topmetricsmaxsize int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TopMetricsMaxSize = &topmetricsmaxsize return r } // API name: translog -func (r *PutSettings) Translog(translog *types.Translog) *PutSettings { +func (r *PutSettings) Translog(translog types.TranslogVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Translog = translog + r.req.Translog = translog.TranslogCaster() return r } // API name: uuid func (r *PutSettings) Uuid(uuid string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Uuid = &uuid return r @@ -860,15 +1184,24 @@ func (r *PutSettings) Uuid(uuid string) *PutSettings { // API name: verified_before_close func (r *PutSettings) VerifiedBeforeClose(verifiedbeforeclose string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.VerifiedBeforeClose = verifiedbeforeclose return r } // API name: version -func (r *PutSettings) Version(version *types.IndexVersioning) *PutSettings { +func (r *PutSettings) Version(version types.IndexVersioningVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Version = version + r.req.Version = version.IndexVersioningCaster() return r } diff --git a/typedapi/indices/putsettings/request.go b/typedapi/indices/putsettings/request.go index d2c2c6ab05..83579d32ea 100644 --- a/typedapi/indices/putsettings/request.go +++ b/typedapi/indices/putsettings/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putsettings @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_settings/IndicesPutSettingsRequest.ts#L25-L93 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_settings/IndicesPutSettingsRequest.ts#L25-L118 type Request = types.IndexSettings // NewRequest returns a Request diff --git a/typedapi/indices/putsettings/response.go b/typedapi/indices/putsettings/response.go index c9d8200e5a..08f1f90fad 100644 --- a/typedapi/indices/putsettings/response.go +++ b/typedapi/indices/putsettings/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putsettings // Response holds the response body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_settings/IndicesPutSettingsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_settings/IndicesPutSettingsResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/puttemplate/put_template.go b/typedapi/indices/puttemplate/put_template.go index 0c9600c6fb..863a88ac35 100644 --- a/typedapi/indices/puttemplate/put_template.go +++ b/typedapi/indices/puttemplate/put_template.go @@ -16,11 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. +// Elasticsearch applies templates to new indices based on an index pattern that +// matches the index name. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// Composable templates always take precedence over legacy templates. +// If no composable template matches a new index, matching legacy templates are +// applied according to their order. +// +// Index templates are only applied during index creation. +// Changes to index templates do not affect existing indices. +// Settings and mappings specified in create index API requests override any +// settings or mappings specified in an index template. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Indices matching multiple templates** +// +// Multiple index templates can potentially match an index, in this case, both +// the settings and mappings are merged into the final configuration of the +// index. +// The order of the merging can be controlled using the order parameter, with +// lower order being applied first, and higher orders overriding them. +// NOTE: Multiple matching templates with the same order value will result in a +// non-deterministic merging order. package puttemplate import ( @@ -86,6 +115,35 @@ func NewPutTemplateFunc(tp elastictransport.Interface) NewPutTemplate { // Create or update an index template. // Index templates define settings, mappings, and aliases that can be applied // automatically to new indices. +// Elasticsearch applies templates to new indices based on an index pattern that +// matches the index name. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// Composable templates always take precedence over legacy templates. +// If no composable template matches a new index, matching legacy templates are +// applied according to their order. +// +// Index templates are only applied during index creation. +// Changes to index templates do not affect existing indices. +// Settings and mappings specified in create index API requests override any +// settings or mappings specified in an index template. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Indices matching multiple templates** +// +// Multiple index templates can potentially match an index, in this case, both +// the settings and mappings are merged into the final configuration of the +// index. +// The order of the merging can be controlled using the order parameter, with +// lower order being applied first, and higher orders overriding them. +// NOTE: Multiple matching templates with the same order value will result in a +// non-deterministic merging order. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates-v1.html func New(tp elastictransport.Interface) *PutTemplate { @@ -95,8 +153,6 @@ func New(tp elastictransport.Interface) *PutTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -385,58 +441,103 @@ func (r *PutTemplate) Pretty(pretty bool) *PutTemplate { return r } -// Aliases Aliases for the index. +// Aliases for the index. // API name: aliases func (r *PutTemplate) Aliases(aliases map[string]types.Alias) *PutTemplate { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *PutTemplate) AddAlias(key string, value types.AliasVariant) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// IndexPatterns Array of wildcard expressions used to match the names +// Array of wildcard expressions used to match the names // of indices during creation. // API name: index_patterns func (r *PutTemplate) IndexPatterns(indexpatterns ...string) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexPatterns = make([]string, len(indexpatterns)) r.req.IndexPatterns = indexpatterns return r } -// Mappings Mapping for fields in the index. +// Mapping for fields in the index. // API name: mappings -func (r *PutTemplate) Mappings(mappings *types.TypeMapping) *PutTemplate { +func (r *PutTemplate) Mappings(mappings types.TypeMappingVariant) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Mappings = mappings + r.req.Mappings = mappings.TypeMappingCaster() return r } -// Order Order in which Elasticsearch applies this template if index +// Order in which Elasticsearch applies this template if index // matches multiple templates. // // Templates with lower 'order' values are merged first. Templates with higher // 'order' values are merged later, overriding templates with lower values. // API name: order func (r *PutTemplate) Order(order int) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Order = &order return r } -// Settings Configuration options for the index. +// Configuration options for the index. // API name: settings -func (r *PutTemplate) Settings(settings *types.IndexSettings) *PutTemplate { +func (r *PutTemplate) Settings(settings types.IndexSettingsVariant) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.IndexSettingsCaster() return r } -// Version Version number used to manage index templates externally. This number +// Version number used to manage index templates externally. This number // is not automatically generated by Elasticsearch. +// To unset a version, replace the template without specifying one. // API name: version func (r *PutTemplate) Version(versionnumber int64) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/indices/puttemplate/request.go b/typedapi/indices/puttemplate/request.go index 75ccf3e114..39c8c7b14f 100644 --- a/typedapi/indices/puttemplate/request.go +++ b/typedapi/indices/puttemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttemplate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package puttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_template/IndicesPutTemplateRequest.ts#L29-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_template/IndicesPutTemplateRequest.ts#L29-L124 type Request struct { // Aliases Aliases for the index. @@ -53,6 +53,7 @@ type Request struct { Settings *types.IndexSettings `json:"settings,omitempty"` // Version Version number used to manage index templates externally. This number // is not automatically generated by Elasticsearch. + // To unset a version, replace the template without specifying one. Version *int64 `json:"version,omitempty"` } diff --git a/typedapi/indices/puttemplate/response.go b/typedapi/indices/puttemplate/response.go index 08b0fbb03e..743ed89cdf 100644 --- a/typedapi/indices/puttemplate/response.go +++ b/typedapi/indices/puttemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttemplate // Response holds the response body struct for the package puttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_template/IndicesPutTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_template/IndicesPutTemplateResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/recovery/recovery.go b/typedapi/indices/recovery/recovery.go index 50d808dfd6..def13209e1 100644 --- a/typedapi/indices/recovery/recovery.go +++ b/typedapi/indices/recovery/recovery.go @@ -16,12 +16,44 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about ongoing and completed shard recoveries for one or -// more indices. -// For data streams, the API returns information for the stream’s backing +// Get index recovery information. +// Get information about ongoing and completed shard recoveries for one or more // indices. +// For data streams, the API returns information for the stream's backing +// indices. +// +// All recoveries, whether ongoing or complete, are kept in the cluster state +// and may be reported on at any time. +// +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or creating a replica shard from a primary +// shard. +// When a shard recovery completes, the recovered shard is available for search +// and indexing. +// +// Recovery automatically occurs during the following processes: +// +// * When creating an index for the first time. +// * When a node rejoins the cluster and starts up any missing primary shard +// copies using the data that it holds in its data path. +// * Creation of new replica shard copies from the primary. +// * Relocation of a shard copy to a different node in the same cluster. +// * A snapshot restore operation. +// * A clone, shrink, or split operation. +// +// You can determine the cause of a shard recovery using the recovery or cat +// recovery APIs. +// +// The index recovery API reports information about completed recoveries only +// for shard copies that currently exist in the cluster. +// It only reports the last recovery for each shard copy and does not report +// historical information about earlier recoveries, nor does it report +// information about the recoveries of shard copies that no longer exist. +// This means that if a shard copy completes a recovery and then Elasticsearch +// relocates it onto a different node then the information about the original +// recovery will not be shown in the recovery API. package recovery import ( @@ -77,10 +109,42 @@ func NewRecoveryFunc(tp elastictransport.Interface) NewRecovery { } } -// Returns information about ongoing and completed shard recoveries for one or -// more indices. -// For data streams, the API returns information for the stream’s backing +// Get index recovery information. +// Get information about ongoing and completed shard recoveries for one or more // indices. +// For data streams, the API returns information for the stream's backing +// indices. +// +// All recoveries, whether ongoing or complete, are kept in the cluster state +// and may be reported on at any time. +// +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or creating a replica shard from a primary +// shard. +// When a shard recovery completes, the recovered shard is available for search +// and indexing. +// +// Recovery automatically occurs during the following processes: +// +// * When creating an index for the first time. +// * When a node rejoins the cluster and starts up any missing primary shard +// copies using the data that it holds in its data path. +// * Creation of new replica shard copies from the primary. +// * Relocation of a shard copy to a different node in the same cluster. +// * A snapshot restore operation. +// * A clone, shrink, or split operation. +// +// You can determine the cause of a shard recovery using the recovery or cat +// recovery APIs. +// +// The index recovery API reports information about completed recoveries only +// for shard copies that currently exist in the cluster. +// It only reports the last recovery for each shard copy and does not report +// historical information about earlier recoveries, nor does it report +// information about the recoveries of shard copies that no longer exist. +// This means that if a shard copy completes a recovery and then Elasticsearch +// relocates it onto a different node then the information about the original +// recovery will not be shown in the recovery API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-recovery.html func New(tp elastictransport.Interface) *Recovery { diff --git a/typedapi/indices/recovery/response.go b/typedapi/indices/recovery/response.go index 4044aa6f18..506b3ce089 100644 --- a/typedapi/indices/recovery/response.go +++ b/typedapi/indices/recovery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package recovery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package recovery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/IndicesRecoveryResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/IndicesRecoveryResponse.ts#L24-L27 type Response map[string]types.RecoveryStatus diff --git a/typedapi/indices/refresh/refresh.go b/typedapi/indices/refresh/refresh.go index e8ddb164a2..1e909dbef8 100644 --- a/typedapi/indices/refresh/refresh.go +++ b/typedapi/indices/refresh/refresh.go @@ -16,13 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Refresh an index. // A refresh makes recent operations performed on one or more indices available // for search. // For data streams, the API runs the refresh operation on the stream’s backing // indices. +// +// By default, Elasticsearch periodically refreshes indices every second, but +// only on indices that have received one search request or more in the last 30 +// seconds. +// You can change this default interval with the `index.refresh_interval` +// setting. +// +// Refresh requests are synchronous and do not return a response until the +// refresh operation completes. +// +// Refreshes are resource-intensive. +// To ensure good cluster performance, it's recommended to wait for +// Elasticsearch's periodic refresh rather than performing an explicit refresh +// when possible. +// +// If your application workflow indexes documents and then runs a search to +// retrieve the indexed document, it's recommended to use the index API's +// `refresh=wait_for` query parameter option. +// This option ensures the indexing operation waits for a periodic refresh +// before running the search. package refresh import ( @@ -85,6 +105,26 @@ func NewRefreshFunc(tp elastictransport.Interface) NewRefresh { // For data streams, the API runs the refresh operation on the stream’s backing // indices. // +// By default, Elasticsearch periodically refreshes indices every second, but +// only on indices that have received one search request or more in the last 30 +// seconds. +// You can change this default interval with the `index.refresh_interval` +// setting. +// +// Refresh requests are synchronous and do not return a response until the +// refresh operation completes. +// +// Refreshes are resource-intensive. +// To ensure good cluster performance, it's recommended to wait for +// Elasticsearch's periodic refresh rather than performing an explicit refresh +// when possible. +// +// If your application workflow indexes documents and then runs a search to +// retrieve the indexed document, it's recommended to use the index API's +// `refresh=wait_for` query parameter option. +// This option ensures the indexing operation waits for a periodic refresh +// before running the search. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html func New(tp elastictransport.Interface) *Refresh { r := &Refresh{ diff --git a/typedapi/indices/refresh/response.go b/typedapi/indices/refresh/response.go index b833782434..04c2d6e351 100644 --- a/typedapi/indices/refresh/response.go +++ b/typedapi/indices/refresh/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package refresh @@ -26,9 +26,9 @@ import ( // Response holds the response body struct for the package refresh // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/refresh/IndicesRefreshResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/refresh/IndicesRefreshResponse.ts#L22-L24 type Response struct { - Shards_ types.ShardStatistics `json:"_shards"` + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go b/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go index 1535e72211..05c0179ada 100644 --- a/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go +++ b/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go @@ -16,9 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Reloads an index's search analyzers and their resources. +// Reload search analyzers. +// Reload an index's search analyzers and their resources. +// For data streams, the API reloads search analyzers and resources for the +// stream's backing indices. +// +// IMPORTANT: After reloading the search analyzers you should clear the request +// cache to make sure it doesn't contain responses derived from the previous +// versions of the analyzer. +// +// You can use the reload search analyzers API to pick up changes to synonym +// files used in the `synonym_graph` or `synonym` token filter of a search +// analyzer. +// To be eligible, the token filter must have an `updateable` flag of `true` and +// only be used in search analyzers. +// +// NOTE: This API does not perform a reload for each shard of an index. +// Instead, it performs a reload for each node containing index shards. +// As a result, the total shard count returned by the API can differ from the +// number of index shards. +// Because reloading affects every node with an index shard, it is important to +// update the synonym file on every data node in the cluster--including nodes +// that don't contain a shard replica--before using this API. +// This ensures the synonym file is updated everywhere in the cluster in case +// shards are relocated in the future. package reloadsearchanalyzers import ( @@ -77,7 +100,30 @@ func NewReloadSearchAnalyzersFunc(tp elastictransport.Interface) NewReloadSearch } } -// Reloads an index's search analyzers and their resources. +// Reload search analyzers. +// Reload an index's search analyzers and their resources. +// For data streams, the API reloads search analyzers and resources for the +// stream's backing indices. +// +// IMPORTANT: After reloading the search analyzers you should clear the request +// cache to make sure it doesn't contain responses derived from the previous +// versions of the analyzer. +// +// You can use the reload search analyzers API to pick up changes to synonym +// files used in the `synonym_graph` or `synonym` token filter of a search +// analyzer. +// To be eligible, the token filter must have an `updateable` flag of `true` and +// only be used in search analyzers. +// +// NOTE: This API does not perform a reload for each shard of an index. +// Instead, it performs a reload for each node containing index shards. +// As a result, the total shard count returned by the API can differ from the +// number of index shards. +// Because reloading affects every node with an index shard, it is important to +// update the synonym file on every data node in the cluster--including nodes +// that don't contain a shard replica--before using this API. +// This ensures the synonym file is updated everywhere in the cluster in case +// shards are relocated in the future. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-reload-analyzers.html func New(tp elastictransport.Interface) *ReloadSearchAnalyzers { diff --git a/typedapi/indices/reloadsearchanalyzers/response.go b/typedapi/indices/reloadsearchanalyzers/response.go index bcd71db08f..81b206928b 100644 --- a/typedapi/indices/reloadsearchanalyzers/response.go +++ b/typedapi/indices/reloadsearchanalyzers/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package reloadsearchanalyzers @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package reloadsearchanalyzers // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L22-L24 type Response struct { ReloadDetails []types.ReloadDetails `json:"reload_details"` Shards_ types.ShardStatistics `json:"_shards"` diff --git a/typedapi/indices/resolvecluster/resolve_cluster.go b/typedapi/indices/resolvecluster/resolve_cluster.go index 2678b3d6fd..f188d12a0d 100644 --- a/typedapi/indices/resolvecluster/resolve_cluster.go +++ b/typedapi/indices/resolvecluster/resolve_cluster.go @@ -16,12 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Resolves the specified index expressions to return information about each -// cluster, including -// the local cluster, if included. -// Multiple patterns and remote clusters are supported. +// Resolve the cluster. +// +// Resolve the specified index expressions to return information about each +// cluster, including the local "querying" cluster, if included. +// If no index expression is provided, the API will return information about all +// the remote clusters that are configured on the querying cluster. +// +// This endpoint is useful before doing a cross-cluster search in order to +// determine which remote clusters should be included in a search. +// +// You use the same index expression with this endpoint as you would for +// cross-cluster search. +// Index and cluster exclusions are also supported with this endpoint. +// +// For each cluster in the index expression, information is returned about: +// +// * Whether the querying ("local") cluster is currently connected to each +// remote cluster specified in the index expression. Note that this endpoint +// actively attempts to contact the remote clusters, unlike the `remote/info` +// endpoint. +// * Whether each remote cluster is configured with `skip_unavailable` as `true` +// or `false`. +// * Whether there are any indices, aliases, or data streams on that cluster +// that match the index expression. +// * Whether the search is likely to have errors returned when you do the +// cross-cluster search (including any authorization errors if you do not have +// permission to query the index). +// * Cluster version information, including the Elasticsearch server version. +// +// For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns +// information about the local cluster and all remotely configured clusters that +// start with the alias `cluster*`. +// Each cluster returns information about whether it has any indices, aliases or +// data streams that match `my-index-*`. +// +// ## Note on backwards compatibility +// The ability to query without an index expression was added in version 8.18, +// so when +// querying remote clusters older than that, the local cluster will send the +// index +// expression `dummy*` to those remote clusters. Thus, if an errors occur, you +// may see a reference +// to that index expression even though you didn't request it. If it causes a +// problem, you can +// instead include an index expression like `*:*` to bypass the issue. +// +// ## Advantages of using this endpoint before a cross-cluster search +// +// You may want to exclude a cluster or index from a search when: +// +// * A remote cluster is not currently connected and is configured with +// `skip_unavailable=false`. Running a cross-cluster search under those +// conditions will cause the entire search to fail. +// * A cluster has no matching indices, aliases or data streams for the index +// expression (or your user does not have permissions to search them). For +// example, suppose your index expression is `logs*,remote1:logs*` and the +// remote1 cluster has no indices, aliases or data streams that match `logs*`. +// In that case, that cluster will return no results from that cluster if you +// include it in a cross-cluster search. +// * The index expression (combined with any query parameters you specify) will +// likely cause an exception to be thrown when you do the search. In these +// cases, the "error" field in the `_resolve/cluster` response will be present. +// (This is also where security/permission errors will be shown.) +// * A remote cluster is an older version that does not support the feature you +// want to use in your search. +// +// ## Test availability of remote clusters +// +// The `remote/info` endpoint is commonly used to test whether the "local" +// cluster (the cluster being queried) is connected to its remote clusters, but +// it does not necessarily reflect whether the remote cluster is available or +// not. +// The remote cluster may be available, while the local cluster is not currently +// connected to it. +// +// You can use the `_resolve/cluster` API to attempt to reconnect to remote +// clusters. +// For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +// The `connected` field in the response will indicate whether it was +// successful. +// If a connection was (re-)established, this will also cause the `remote/info` +// endpoint to now indicate a connected status. package resolvecluster import ( @@ -66,24 +144,100 @@ type ResolveCluster struct { } // NewResolveCluster type alias for index. -type NewResolveCluster func(name string) *ResolveCluster +type NewResolveCluster func() *ResolveCluster // NewResolveClusterFunc returns a new instance of ResolveCluster with the provided transport. // Used in the index of the library this allows to retrieve every apis in once place. func NewResolveClusterFunc(tp elastictransport.Interface) NewResolveCluster { - return func(name string) *ResolveCluster { + return func() *ResolveCluster { n := New(tp) - n._name(name) - return n } } -// Resolves the specified index expressions to return information about each -// cluster, including -// the local cluster, if included. -// Multiple patterns and remote clusters are supported. +// Resolve the cluster. +// +// Resolve the specified index expressions to return information about each +// cluster, including the local "querying" cluster, if included. +// If no index expression is provided, the API will return information about all +// the remote clusters that are configured on the querying cluster. +// +// This endpoint is useful before doing a cross-cluster search in order to +// determine which remote clusters should be included in a search. +// +// You use the same index expression with this endpoint as you would for +// cross-cluster search. +// Index and cluster exclusions are also supported with this endpoint. +// +// For each cluster in the index expression, information is returned about: +// +// * Whether the querying ("local") cluster is currently connected to each +// remote cluster specified in the index expression. Note that this endpoint +// actively attempts to contact the remote clusters, unlike the `remote/info` +// endpoint. +// * Whether each remote cluster is configured with `skip_unavailable` as `true` +// or `false`. +// * Whether there are any indices, aliases, or data streams on that cluster +// that match the index expression. +// * Whether the search is likely to have errors returned when you do the +// cross-cluster search (including any authorization errors if you do not have +// permission to query the index). +// * Cluster version information, including the Elasticsearch server version. +// +// For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns +// information about the local cluster and all remotely configured clusters that +// start with the alias `cluster*`. +// Each cluster returns information about whether it has any indices, aliases or +// data streams that match `my-index-*`. +// +// ## Note on backwards compatibility +// The ability to query without an index expression was added in version 8.18, +// so when +// querying remote clusters older than that, the local cluster will send the +// index +// expression `dummy*` to those remote clusters. Thus, if an errors occur, you +// may see a reference +// to that index expression even though you didn't request it. If it causes a +// problem, you can +// instead include an index expression like `*:*` to bypass the issue. +// +// ## Advantages of using this endpoint before a cross-cluster search +// +// You may want to exclude a cluster or index from a search when: +// +// * A remote cluster is not currently connected and is configured with +// `skip_unavailable=false`. Running a cross-cluster search under those +// conditions will cause the entire search to fail. +// * A cluster has no matching indices, aliases or data streams for the index +// expression (or your user does not have permissions to search them). For +// example, suppose your index expression is `logs*,remote1:logs*` and the +// remote1 cluster has no indices, aliases or data streams that match `logs*`. +// In that case, that cluster will return no results from that cluster if you +// include it in a cross-cluster search. +// * The index expression (combined with any query parameters you specify) will +// likely cause an exception to be thrown when you do the search. In these +// cases, the "error" field in the `_resolve/cluster` response will be present. +// (This is also where security/permission errors will be shown.) +// * A remote cluster is an older version that does not support the feature you +// want to use in your search. +// +// ## Test availability of remote clusters +// +// The `remote/info` endpoint is commonly used to test whether the "local" +// cluster (the cluster being queried) is connected to its remote clusters, but +// it does not necessarily reflect whether the remote cluster is available or +// not. +// The remote cluster may be available, while the local cluster is not currently +// connected to it. +// +// You can use the `_resolve/cluster` API to attempt to reconnect to remote +// clusters. +// For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +// The `connected` field in the response will indicate whether it was +// successful. +// If a connection was (re-)established, this will also cause the `remote/info` +// endpoint to now indicate a connected status. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-cluster-api.html func New(tp elastictransport.Interface) *ResolveCluster { @@ -114,6 +268,13 @@ func (r *ResolveCluster) HttpRequest(ctx context.Context) (*http.Request, error) r.path.Scheme = "http" switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_resolve") + path.WriteString("/") + path.WriteString("cluster") + + method = http.MethodGet case r.paramSet == nameMask: path.WriteString("/") path.WriteString("_resolve") @@ -297,12 +458,16 @@ func (r *ResolveCluster) Header(key, value string) *ResolveCluster { return r } -// Name Comma-separated name(s) or index pattern(s) of the indices, aliases, and data -// streams to resolve. +// Name A comma-separated list of names or index patterns for the indices, aliases, +// and data streams to resolve. // Resources on remote clusters can be specified using the ``:`` // syntax. +// Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +// If no index expression is specified, information about all remote clusters +// configured on the local cluster +// is returned without doing any index matching // API Name: name -func (r *ResolveCluster) _name(name string) *ResolveCluster { +func (r *ResolveCluster) Name(name string) *ResolveCluster { r.paramSet |= nameMask r.name = name @@ -310,11 +475,15 @@ func (r *ResolveCluster) _name(name string) *ResolveCluster { } // AllowNoIndices If false, the request returns an error if any wildcard expression, index -// alias, or _all value targets only missing +// alias, or `_all` value targets only missing // or closed indices. This behavior applies even if the request targets other // open indices. For example, a request -// targeting foo*,bar* returns an error if an index starts with foo but no index -// starts with bar. +// targeting `foo*,bar*` returns an error if an index starts with `foo` but no +// index starts with `bar`. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. // API name: allow_no_indices func (r *ResolveCluster) AllowNoIndices(allownoindices bool) *ResolveCluster { r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) @@ -327,6 +496,10 @@ func (r *ResolveCluster) AllowNoIndices(allownoindices bool) *ResolveCluster { // wildcard expressions match hidden data streams. // Supports comma-separated values, such as `open,hidden`. // Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. // API name: expand_wildcards func (r *ResolveCluster) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ResolveCluster { tmp := []string{} @@ -338,8 +511,11 @@ func (r *ResolveCluster) ExpandWildcards(expandwildcards ...expandwildcard.Expan return r } -// IgnoreThrottled If true, concrete, expanded or aliased indices are ignored when frozen. -// Defaults to false. +// IgnoreThrottled If true, concrete, expanded, or aliased indices are ignored when frozen. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. // API name: ignore_throttled func (r *ResolveCluster) IgnoreThrottled(ignorethrottled bool) *ResolveCluster { r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) @@ -348,7 +524,11 @@ func (r *ResolveCluster) IgnoreThrottled(ignorethrottled bool) *ResolveCluster { } // IgnoreUnavailable If false, the request returns an error if it targets a missing or closed -// index. Defaults to false. +// index. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. // API name: ignore_unavailable func (r *ResolveCluster) IgnoreUnavailable(ignoreunavailable bool) *ResolveCluster { r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) @@ -356,6 +536,23 @@ func (r *ResolveCluster) IgnoreUnavailable(ignoreunavailable bool) *ResolveClust return r } +// Timeout The maximum time to wait for remote clusters to respond. +// If a remote cluster does not respond within this timeout period, the API +// response +// will show the cluster as not connected and include an error message that the +// request timed out. +// +// The default timeout is unset and the query can take +// as long as the networking layer is configured to wait for remote clusters +// that are +// not responding (typically 30 seconds). +// API name: timeout +func (r *ResolveCluster) Timeout(duration string) *ResolveCluster { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/resolvecluster/response.go b/typedapi/indices/resolvecluster/response.go index 132d161279..9d5a753bc7 100644 --- a/typedapi/indices/resolvecluster/response.go +++ b/typedapi/indices/resolvecluster/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package resolvecluster @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package resolvecluster // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L24-L27 type Response map[string]types.ResolveClusterInfo diff --git a/typedapi/indices/resolveindex/resolve_index.go b/typedapi/indices/resolveindex/resolve_index.go index 463da8f5f4..775718c4ad 100644 --- a/typedapi/indices/resolveindex/resolve_index.go +++ b/typedapi/indices/resolveindex/resolve_index.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Resolves the specified name(s) and/or index patterns for indices, aliases, -// and data streams. +// Resolve indices. +// Resolve the names and/or index patterns for indices, aliases, and data +// streams. // Multiple patterns and remote clusters are supported. package resolveindex @@ -79,8 +80,9 @@ func NewResolveIndexFunc(tp elastictransport.Interface) NewResolveIndex { } } -// Resolves the specified name(s) and/or index patterns for indices, aliases, -// and data streams. +// Resolve indices. +// Resolve the names and/or index patterns for indices, aliases, and data +// streams. // Multiple patterns and remote clusters are supported. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-index-api.html @@ -323,6 +325,27 @@ func (r *ResolveIndex) ExpandWildcards(expandwildcards ...expandwildcard.ExpandW return r } +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *ResolveIndex) IgnoreUnavailable(ignoreunavailable bool) *ResolveIndex { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *ResolveIndex) AllowNoIndices(allownoindices bool) *ResolveIndex { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/indices/resolveindex/response.go b/typedapi/indices/resolveindex/response.go index 70f49e82d9..5017d1840a 100644 --- a/typedapi/indices/resolveindex/response.go +++ b/typedapi/indices/resolveindex/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package resolveindex @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package resolveindex // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/resolve_index/ResolveIndexResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/resolve_index/ResolveIndexResponse.ts#L22-L28 type Response struct { Aliases []types.ResolveIndexAliasItem `json:"aliases"` DataStreams []types.ResolveIndexDataStreamsItem `json:"data_streams"` diff --git a/typedapi/indices/rollover/request.go b/typedapi/indices/rollover/request.go index 7f28257146..dde7e54587 100644 --- a/typedapi/indices/rollover/request.go +++ b/typedapi/indices/rollover/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package rollover @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package rollover // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/rollover/IndicesRolloverRequest.ts#L29-L100 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/rollover/IndicesRolloverRequest.ts#L29-L147 type Request struct { // Aliases Aliases for the target index. diff --git a/typedapi/indices/rollover/response.go b/typedapi/indices/rollover/response.go index b6879e9d0a..30e4113e47 100644 --- a/typedapi/indices/rollover/response.go +++ b/typedapi/indices/rollover/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package rollover // Response holds the response body struct for the package rollover // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/rollover/IndicesRolloverResponse.ts#L22-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/rollover/IndicesRolloverResponse.ts#L22-L32 type Response struct { Acknowledged bool `json:"acknowledged"` Conditions map[string]bool `json:"conditions"` diff --git a/typedapi/indices/rollover/rollover.go b/typedapi/indices/rollover/rollover.go index faaa082b5a..8d27982985 100644 --- a/typedapi/indices/rollover/rollover.go +++ b/typedapi/indices/rollover/rollover.go @@ -16,10 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Roll over to a new index. -// Creates a new index for a data stream or index alias. +// TIP: It is recommended to use the index lifecycle rollover action to automate +// rollovers. +// +// The rollover API creates a new index for a data stream or index alias. +// The API behavior depends on the rollover target. +// +// **Roll over a data stream** +// +// If you roll over a data stream, the API creates a new write index for the +// stream. +// The stream's previous write index becomes a regular backing index. +// A rollover also increments the data stream's generation. +// +// **Roll over an index alias with a write index** +// +// TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a +// write index to manage time series data. +// Data streams replace this functionality, require less maintenance, and +// automatically integrate with data tiers. +// +// If an index alias points to multiple indices, one of the indices must be a +// write index. +// The rollover API creates a new write index for the alias with +// `is_write_index` set to `true`. +// The API also `sets is_write_index` to `false` for the previous write index. +// +// **Roll over an index alias with one index** +// +// If you roll over an index alias that points to only one index, the API +// creates a new index for the alias and removes the original index from the +// alias. +// +// NOTE: A rollover creates a new index and is subject to the +// `wait_for_active_shards` setting. +// +// **Increment index names for an alias** +// +// When you roll over an index alias, you can specify a name for the new index. +// If you don't specify a name and the current index ends with `-` and a number, +// such as `my-index-000001` or `my-index-3`, the new index name increments that +// number. +// For example, if you roll over an alias with a current index of +// `my-index-000001`, the rollover creates a new index named `my-index-000002`. +// This number is always six characters and zero-padded, regardless of the +// previous index's name. +// +// If you use an index alias for time series data, you can use date math in the +// index name to track the rollover date. +// For example, you can create an alias that points to an index named +// ``. +// If you create the index on May 6, 2099, the index's name is +// `my-index-2099.05.06-000001`. +// If you roll over the alias on May 7, 2099, the new index's name is +// `my-index-2099.05.07-000002`. package rollover import ( @@ -86,7 +139,60 @@ func NewRolloverFunc(tp elastictransport.Interface) NewRollover { } // Roll over to a new index. -// Creates a new index for a data stream or index alias. +// TIP: It is recommended to use the index lifecycle rollover action to automate +// rollovers. +// +// The rollover API creates a new index for a data stream or index alias. +// The API behavior depends on the rollover target. +// +// **Roll over a data stream** +// +// If you roll over a data stream, the API creates a new write index for the +// stream. +// The stream's previous write index becomes a regular backing index. +// A rollover also increments the data stream's generation. +// +// **Roll over an index alias with a write index** +// +// TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a +// write index to manage time series data. +// Data streams replace this functionality, require less maintenance, and +// automatically integrate with data tiers. +// +// If an index alias points to multiple indices, one of the indices must be a +// write index. +// The rollover API creates a new write index for the alias with +// `is_write_index` set to `true`. +// The API also `sets is_write_index` to `false` for the previous write index. +// +// **Roll over an index alias with one index** +// +// If you roll over an index alias that points to only one index, the API +// creates a new index for the alias and removes the original index from the +// alias. +// +// NOTE: A rollover creates a new index and is subject to the +// `wait_for_active_shards` setting. +// +// **Increment index names for an alias** +// +// When you roll over an index alias, you can specify a name for the new index. +// If you don't specify a name and the current index ends with `-` and a number, +// such as `my-index-000001` or `my-index-3`, the new index name increments that +// number. +// For example, if you roll over an alias with a current index of +// `my-index-000001`, the rollover creates a new index named `my-index-000002`. +// This number is always six characters and zero-padded, regardless of the +// previous index's name. +// +// If you use an index alias for time series data, you can use date math in the +// index name to track the rollover date. +// For example, you can create an alias that points to an index named +// ``. +// If you create the index on May 6, 2099, the index's name is +// `my-index-2099.05.06-000001`. +// If you roll over the alias on May 7, 2099, the new index's name is +// `my-index-2099.05.07-000002`. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html func New(tp elastictransport.Interface) *Rollover { @@ -96,8 +202,6 @@ func New(tp elastictransport.Interface) *Rollover { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -430,17 +534,38 @@ func (r *Rollover) Pretty(pretty bool) *Rollover { return r } -// Aliases Aliases for the target index. +// Aliases for the target index. // Data streams do not support this parameter. // API name: aliases func (r *Rollover) Aliases(aliases map[string]types.Alias) *Rollover { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Rollover) AddAlias(key string, value types.AliasVariant) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// Conditions Conditions for the rollover. +// Conditions for the rollover. // If specified, Elasticsearch only performs the rollover if the current index // satisfies these conditions. // If this parameter is not specified, Elasticsearch performs the rollover @@ -450,30 +575,59 @@ func (r *Rollover) Aliases(aliases map[string]types.Alias) *Rollover { // The index will rollover if any `max_*` condition is satisfied and all `min_*` // conditions are satisfied. // API name: conditions -func (r *Rollover) Conditions(conditions *types.RolloverConditions) *Rollover { +func (r *Rollover) Conditions(conditions types.RolloverConditionsVariant) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Conditions = conditions + r.req.Conditions = conditions.RolloverConditionsCaster() return r } -// Mappings Mapping for fields in the index. +// Mapping for fields in the index. // If specified, this mapping can include field names, field data types, and // mapping paramaters. // API name: mappings -func (r *Rollover) Mappings(mappings *types.TypeMapping) *Rollover { +func (r *Rollover) Mappings(mappings types.TypeMappingVariant) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Mappings = mappings + r.req.Mappings = mappings.TypeMappingCaster() return r } -// Settings Configuration options for the index. +// Configuration options for the index. // Data streams do not support this parameter. // API name: settings func (r *Rollover) Settings(settings map[string]json.RawMessage) *Rollover { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *Rollover) AddSetting(key string, value json.RawMessage) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/indices/segments/response.go b/typedapi/indices/segments/response.go index b3e401eb75..0608c71c22 100644 --- a/typedapi/indices/segments/response.go +++ b/typedapi/indices/segments/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package segments @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package segments // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/segments/IndicesSegmentsResponse.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/segments/IndicesSegmentsResponse.ts#L24-L29 type Response struct { Indices map[string]types.IndexSegment `json:"indices"` Shards_ types.ShardStatistics `json:"_shards"` diff --git a/typedapi/indices/segments/segments.go b/typedapi/indices/segments/segments.go index 5cb4b68988..0720c22509 100644 --- a/typedapi/indices/segments/segments.go +++ b/typedapi/indices/segments/segments.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns low-level information about the Lucene segments in index shards. -// For data streams, the API returns information about the stream’s backing +// Get index segments. +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the stream's backing // indices. package segments @@ -77,8 +78,9 @@ func NewSegmentsFunc(tp elastictransport.Interface) NewSegments { } } -// Returns low-level information about the Lucene segments in index shards. -// For data streams, the API returns information about the stream’s backing +// Get index segments. +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the stream's backing // indices. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-segments.html diff --git a/typedapi/indices/shardstores/response.go b/typedapi/indices/shardstores/response.go index 1a1511ae7b..970f671a7f 100644 --- a/typedapi/indices/shardstores/response.go +++ b/typedapi/indices/shardstores/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package shardstores @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package shardstores // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shard_stores/IndicesShardStoresResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shard_stores/IndicesShardStoresResponse.ts#L24-L26 type Response struct { Indices map[string]types.IndicesShardStores `json:"indices"` } diff --git a/typedapi/indices/shardstores/shard_stores.go b/typedapi/indices/shardstores/shard_stores.go index 6fff296c7f..5561238a6b 100644 --- a/typedapi/indices/shardstores/shard_stores.go +++ b/typedapi/indices/shardstores/shard_stores.go @@ -16,11 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves store information about replica shards in one or more indices. -// For data streams, the API retrieves store information for the stream’s +// Get index shard stores. +// Get store information about replica shards in one or more indices. +// For data streams, the API retrieves store information for the stream's // backing indices. +// +// The index shard stores API returns the following information: +// +// * The node on which each replica shard exists. +// * The allocation ID for each replica shard. +// * A unique ID for each replica shard. +// * Any errors encountered while opening the shard index or from an earlier +// failure. +// +// By default, the API returns store information only for primary shards that +// are unassigned or have one or more unassigned replica shards. package shardstores import ( @@ -78,10 +90,22 @@ func NewShardStoresFunc(tp elastictransport.Interface) NewShardStores { } } -// Retrieves store information about replica shards in one or more indices. -// For data streams, the API retrieves store information for the stream’s +// Get index shard stores. +// Get store information about replica shards in one or more indices. +// For data streams, the API retrieves store information for the stream's // backing indices. // +// The index shard stores API returns the following information: +// +// * The node on which each replica shard exists. +// * The allocation ID for each replica shard. +// * A unique ID for each replica shard. +// * Any errors encountered while opening the shard index or from an earlier +// failure. +// +// By default, the API returns store information only for primary shards that +// are unassigned or have one or more unassigned replica shards. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html func New(tp elastictransport.Interface) *ShardStores { r := &ShardStores{ diff --git a/typedapi/indices/shrink/request.go b/typedapi/indices/shrink/request.go index b3534c64a8..087c03fc8f 100644 --- a/typedapi/indices/shrink/request.go +++ b/typedapi/indices/shrink/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package shrink @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package shrink // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shrink/IndicesShrinkRequest.ts#L27-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shrink/IndicesShrinkRequest.ts#L27-L113 type Request struct { // Aliases The key is the alias name. diff --git a/typedapi/indices/shrink/response.go b/typedapi/indices/shrink/response.go index 7dadb1b3ec..f151e001e1 100644 --- a/typedapi/indices/shrink/response.go +++ b/typedapi/indices/shrink/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package shrink // Response holds the response body struct for the package shrink // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shrink/IndicesShrinkResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shrink/IndicesShrinkResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Index string `json:"index"` diff --git a/typedapi/indices/shrink/shrink.go b/typedapi/indices/shrink/shrink.go index 1f7d859612..d0dfb82b11 100644 --- a/typedapi/indices/shrink/shrink.go +++ b/typedapi/indices/shrink/shrink.go @@ -16,9 +16,64 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Shrinks an existing index into a new index with fewer primary shards. +// Shrink an index. +// Shrink an index into a new index with fewer primary shards. +// +// Before you can shrink an index: +// +// * The index must be read-only. +// * A copy of every shard in the index must reside on the same node. +// * The index must have a green health status. +// +// To make shard allocation easier, we recommend you also remove the index's +// replica shards. +// You can later re-add replica shards as part of the shrink operation. +// +// The requested number of primary shards in the target index must be a factor +// of the number of shards in the source index. +// For example an index with 8 primary shards can be shrunk into 4, 2 or 1 +// primary shards or an index with 15 primary shards can be shrunk into 5, 3 or +// 1. +// If the number of shards in the index is a prime number it can only be shrunk +// into a single primary shard +// +// Before shrinking, a (primary or replica) copy of every shard in the index +// +// must be present on the same node. +// +// The current write index on a data stream cannot be shrunk. In order to shrink +// the current write index, the data stream must first be rolled over so that a +// new write index is created and then the previous write index can be shrunk. +// +// A shrink operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a smaller number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system does not support hard-linking, then all segments are copied into +// the new index, which is a much more time consuming process. Also if using +// multiple data paths, shards on different data paths require a full copy of +// segment files if they are not on the same disk since hardlinks do not work +// across disks. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. Recovers shards to the +// `.routing.allocation.initial_recovery._id` index setting. +// +// IMPORTANT: Indices can only be shrunk if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have more primary shards than the target index. +// * The number of primary shards in the target index must be a factor of the +// number of primary shards in the source index. The source index must have more +// primary shards than the target index. +// * The index must not contain more than 2,147,483,519 documents in total +// across all shards that will be shrunk into a single shard on the target index +// as this is the maximum number of docs that can fit into a single shard. +// * The node handling the shrink process must have sufficient free disk space +// to accommodate a second copy of the existing index. package shrink import ( @@ -86,7 +141,62 @@ func NewShrinkFunc(tp elastictransport.Interface) NewShrink { } } -// Shrinks an existing index into a new index with fewer primary shards. +// Shrink an index. +// Shrink an index into a new index with fewer primary shards. +// +// Before you can shrink an index: +// +// * The index must be read-only. +// * A copy of every shard in the index must reside on the same node. +// * The index must have a green health status. +// +// To make shard allocation easier, we recommend you also remove the index's +// replica shards. +// You can later re-add replica shards as part of the shrink operation. +// +// The requested number of primary shards in the target index must be a factor +// of the number of shards in the source index. +// For example an index with 8 primary shards can be shrunk into 4, 2 or 1 +// primary shards or an index with 15 primary shards can be shrunk into 5, 3 or +// 1. +// If the number of shards in the index is a prime number it can only be shrunk +// into a single primary shard +// +// Before shrinking, a (primary or replica) copy of every shard in the index +// +// must be present on the same node. +// +// The current write index on a data stream cannot be shrunk. In order to shrink +// the current write index, the data stream must first be rolled over so that a +// new write index is created and then the previous write index can be shrunk. +// +// A shrink operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a smaller number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system does not support hard-linking, then all segments are copied into +// the new index, which is a much more time consuming process. Also if using +// multiple data paths, shards on different data paths require a full copy of +// segment files if they are not on the same disk since hardlinks do not work +// across disks. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. Recovers shards to the +// `.routing.allocation.initial_recovery._id` index setting. +// +// IMPORTANT: Indices can only be shrunk if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have more primary shards than the target index. +// * The number of primary shards in the target index must be a factor of the +// number of primary shards in the source index. The source index must have more +// primary shards than the target index. +// * The index must not contain more than 2,147,483,519 documents in total +// across all shards that will be shrunk into a single shard on the target index +// as this is the maximum number of docs that can fit into a single shard. +// * The node handling the shrink process must have sufficient free disk space +// to accommodate a second copy of the existing index. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html func New(tp elastictransport.Interface) *Shrink { @@ -96,8 +206,6 @@ func New(tp elastictransport.Interface) *Shrink { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -408,21 +516,63 @@ func (r *Shrink) Pretty(pretty bool) *Shrink { return r } -// Aliases The key is the alias name. +// The key is the alias name. // Index alias names support date math. // API name: aliases func (r *Shrink) Aliases(aliases map[string]types.Alias) *Shrink { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Shrink) AddAlias(key string, value types.AliasVariant) *Shrink { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + r.req.Aliases = tmp return r } -// Settings Configuration options for the target index. +// Configuration options for the target index. // API name: settings func (r *Shrink) Settings(settings map[string]json.RawMessage) *Shrink { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *Shrink) AddSetting(key string, value json.RawMessage) *Shrink { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/indices/simulateindextemplate/response.go b/typedapi/indices/simulateindextemplate/response.go index 82824d46dd..b3b2f631b8 100644 --- a/typedapi/indices/simulateindextemplate/response.go +++ b/typedapi/indices/simulateindextemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package simulateindextemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package simulateindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateResponse.ts#L25-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateResponse.ts#L25-L30 type Response struct { Overlapping []types.Overlapping `json:"overlapping,omitempty"` Template types.Template `json:"template"` diff --git a/typedapi/indices/simulateindextemplate/simulate_index_template.go b/typedapi/indices/simulateindextemplate/simulate_index_template.go index 47fd506b5f..0bf0bf0cc9 100644 --- a/typedapi/indices/simulateindextemplate/simulate_index_template.go +++ b/typedapi/indices/simulateindextemplate/simulate_index_template.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Simulate an index. -// Returns the index configuration that would be applied to the specified index -// from an existing index template. +// Get the index configuration that would be applied to the specified index from +// an existing index template. package simulateindextemplate import ( @@ -79,8 +79,8 @@ func NewSimulateIndexTemplateFunc(tp elastictransport.Interface) NewSimulateInde } // Simulate an index. -// Returns the index configuration that would be applied to the specified index -// from an existing index template. +// Get the index configuration that would be applied to the specified index from +// an existing index template. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-simulate-index.html func New(tp elastictransport.Interface) *SimulateIndexTemplate { diff --git a/typedapi/indices/simulatetemplate/request.go b/typedapi/indices/simulatetemplate/request.go index 42f7828179..89fdb71cbd 100644 --- a/typedapi/indices/simulatetemplate/request.go +++ b/typedapi/indices/simulatetemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package simulatetemplate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package simulatetemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L120 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L131 type Request struct { // AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster diff --git a/typedapi/indices/simulatetemplate/response.go b/typedapi/indices/simulatetemplate/response.go index b954418c08..00c214d0ff 100644 --- a/typedapi/indices/simulatetemplate/response.go +++ b/typedapi/indices/simulatetemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package simulatetemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package simulatetemplate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L26-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L26-L31 type Response struct { Overlapping []types.Overlapping `json:"overlapping,omitempty"` Template types.Template `json:"template"` diff --git a/typedapi/indices/simulatetemplate/simulate_template.go b/typedapi/indices/simulatetemplate/simulate_template.go index b5a23e83dc..bb5111aa9c 100644 --- a/typedapi/indices/simulatetemplate/simulate_template.go +++ b/typedapi/indices/simulatetemplate/simulate_template.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Simulate an index template. -// Returns the index configuration that would be applied by a particular index +// Get the index configuration that would be applied by a particular index // template. package simulatetemplate @@ -82,7 +82,7 @@ func NewSimulateTemplateFunc(tp elastictransport.Interface) NewSimulateTemplate } // Simulate an index template. -// Returns the index configuration that would be applied by a particular index +// Get the index configuration that would be applied by a particular index // template. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-simulate-template.html @@ -93,8 +93,6 @@ func New(tp elastictransport.Interface) *SimulateTemplate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -398,7 +396,7 @@ func (r *SimulateTemplate) Pretty(pretty bool) *SimulateTemplate { return r } -// AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster +// This setting overrides the value of the `action.auto_create_index` cluster // setting. // If set to `true` in a template, then indices can be automatically created // using that template even if auto-creation of indices is disabled via @@ -407,74 +405,112 @@ func (r *SimulateTemplate) Pretty(pretty bool) *SimulateTemplate { // always be explicitly created, and may never be automatically created. // API name: allow_auto_create func (r *SimulateTemplate) AllowAutoCreate(allowautocreate bool) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowAutoCreate = &allowautocreate return r } -// ComposedOf An ordered list of component template names. +// An ordered list of component template names. // Component templates are merged in the order specified, meaning that the last // component template specified has the highest precedence. // API name: composed_of func (r *SimulateTemplate) ComposedOf(composedofs ...string) *SimulateTemplate { - r.req.ComposedOf = composedofs + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range composedofs { + + r.req.ComposedOf = append(r.req.ComposedOf, v) + } return r } -// DataStream If this object is included, the template is used to create data streams and +// If this object is included, the template is used to create data streams and // their backing indices. // Supports an empty object. // Data streams require a matching index template with a `data_stream` object. // API name: data_stream -func (r *SimulateTemplate) DataStream(datastream *types.DataStreamVisibility) *SimulateTemplate { +func (r *SimulateTemplate) DataStream(datastream types.DataStreamVisibilityVariant) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DataStream = datastream + r.req.DataStream = datastream.DataStreamVisibilityCaster() return r } -// Deprecated Marks this index template as deprecated. When creating or updating a +// Marks this index template as deprecated. When creating or updating a // non-deprecated index template // that uses deprecated components, Elasticsearch will emit a deprecation // warning. // API name: deprecated func (r *SimulateTemplate) Deprecated(deprecated bool) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Deprecated = &deprecated return r } -// IgnoreMissingComponentTemplates The configuration option ignore_missing_component_templates can be used when +// The configuration option ignore_missing_component_templates can be used when // an index template // references a component template that might not exist // API name: ignore_missing_component_templates func (r *SimulateTemplate) IgnoreMissingComponentTemplates(ignoremissingcomponenttemplates ...string) *SimulateTemplate { - r.req.IgnoreMissingComponentTemplates = ignoremissingcomponenttemplates + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoremissingcomponenttemplates { + r.req.IgnoreMissingComponentTemplates = append(r.req.IgnoreMissingComponentTemplates, v) + + } return r } -// IndexPatterns Array of wildcard (`*`) expressions used to match the names of data streams +// Array of wildcard (`*`) expressions used to match the names of data streams // and indices during creation. // API name: index_patterns func (r *SimulateTemplate) IndexPatterns(indices ...string) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexPatterns = indices return r } -// Meta_ Optional user metadata about the index template. +// Optional user metadata about the index template. // May have any contents. // This map is not automatically generated by Elasticsearch. // API name: _meta -func (r *SimulateTemplate) Meta_(metadata types.Metadata) *SimulateTemplate { - r.req.Meta_ = metadata +func (r *SimulateTemplate) Meta_(metadata types.MetadataVariant) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// Priority Priority to determine index template precedence when a new data stream or +// Priority to determine index template precedence when a new data stream or // index is created. // The index template with the highest priority is chosen. // If no priority is specified the template is treated as though it is of @@ -482,27 +518,40 @@ func (r *SimulateTemplate) Meta_(metadata types.Metadata) *SimulateTemplate { // This number is not automatically generated by Elasticsearch. // API name: priority func (r *SimulateTemplate) Priority(priority int64) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Priority = &priority return r } -// Template Template to be applied. +// Template to be applied. // It may optionally include an `aliases`, `mappings`, or `settings` // configuration. // API name: template -func (r *SimulateTemplate) Template(template *types.IndexTemplateMapping) *SimulateTemplate { +func (r *SimulateTemplate) Template(template types.IndexTemplateMappingVariant) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Template = template + r.req.Template = template.IndexTemplateMappingCaster() return r } -// Version Version number used to manage index templates externally. +// Version number used to manage index templates externally. // This number is not automatically generated by Elasticsearch. // API name: version func (r *SimulateTemplate) Version(versionnumber int64) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/indices/split/request.go b/typedapi/indices/split/request.go index 86ba9e82d7..40a34514e5 100644 --- a/typedapi/indices/split/request.go +++ b/typedapi/indices/split/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package split @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package split // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/split/IndicesSplitRequest.ts#L27-L74 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/split/IndicesSplitRequest.ts#L27-L113 type Request struct { // Aliases Aliases for the resulting index. diff --git a/typedapi/indices/split/response.go b/typedapi/indices/split/response.go index d036373fee..2c3c85c6fd 100644 --- a/typedapi/indices/split/response.go +++ b/typedapi/indices/split/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package split // Response holds the response body struct for the package split // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/split/IndicesSplitResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/split/IndicesSplitResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` Index string `json:"index"` diff --git a/typedapi/indices/split/split.go b/typedapi/indices/split/split.go index 3289806a1f..f3d3bbd4e8 100644 --- a/typedapi/indices/split/split.go +++ b/typedapi/indices/split/split.go @@ -16,9 +16,56 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Splits an existing index into a new index with more primary shards. +// Split an index. +// Split an index into a new index with more primary shards. +// * Before you can split an index: +// +// * The index must be read-only. +// * The cluster health status must be green. +// +// You can do make an index read-only with the following request using the add +// index block API: +// +// ``` +// PUT /my_source_index/_block/write +// ``` +// +// The current write index on a data stream cannot be split. +// In order to split the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be split. +// +// The number of times the index can be split (and the number of shards that +// each original shard can be split into) is determined by the +// `index.number_of_routing_shards` setting. +// The number of routing shards specifies the hashing space that is used +// internally to distribute documents across shards with consistent hashing. +// For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x +// 2 x 3) could be split by a factor of 2 or 3. +// +// A split operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a larger number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system doesn't support hard-linking, all segments are copied into the +// new index, which is a much more time consuming process. +// * Hashes all documents again, after low level files are created, to delete +// documents that belong to a different shard. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. +// +// IMPORTANT: Indices can only be split if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have fewer primary shards than the target index. +// * The number of primary shards in the target index must be a multiple of the +// number of primary shards in the source index. +// * The node handling the split process must have sufficient free disk space to +// accommodate a second copy of the existing index. package split import ( @@ -86,7 +133,54 @@ func NewSplitFunc(tp elastictransport.Interface) NewSplit { } } -// Splits an existing index into a new index with more primary shards. +// Split an index. +// Split an index into a new index with more primary shards. +// * Before you can split an index: +// +// * The index must be read-only. +// * The cluster health status must be green. +// +// You can do make an index read-only with the following request using the add +// index block API: +// +// ``` +// PUT /my_source_index/_block/write +// ``` +// +// The current write index on a data stream cannot be split. +// In order to split the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be split. +// +// The number of times the index can be split (and the number of shards that +// each original shard can be split into) is determined by the +// `index.number_of_routing_shards` setting. +// The number of routing shards specifies the hashing space that is used +// internally to distribute documents across shards with consistent hashing. +// For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x +// 2 x 3) could be split by a factor of 2 or 3. +// +// A split operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a larger number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system doesn't support hard-linking, all segments are copied into the +// new index, which is a much more time consuming process. +// * Hashes all documents again, after low level files are created, to delete +// documents that belong to a different shard. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. +// +// IMPORTANT: Indices can only be split if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have fewer primary shards than the target index. +// * The number of primary shards in the target index must be a multiple of the +// number of primary shards in the source index. +// * The node handling the split process must have sufficient free disk space to +// accommodate a second copy of the existing index. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html func New(tp elastictransport.Interface) *Split { @@ -96,8 +190,6 @@ func New(tp elastictransport.Interface) *Split { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -408,20 +500,62 @@ func (r *Split) Pretty(pretty bool) *Split { return r } -// Aliases Aliases for the resulting index. +// Aliases for the resulting index. // API name: aliases func (r *Split) Aliases(aliases map[string]types.Alias) *Split { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aliases = aliases + return r +} + +func (r *Split) AddAlias(key string, value types.AliasVariant) *Split { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + + r.req.Aliases = tmp return r } -// Settings Configuration options for the target index. +// Configuration options for the target index. // API name: settings func (r *Split) Settings(settings map[string]json.RawMessage) *Split { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Settings = settings + return r +} + +func (r *Split) AddSetting(key string, value json.RawMessage) *Split { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + r.req.Settings = tmp return r } diff --git a/typedapi/indices/stats/response.go b/typedapi/indices/stats/response.go index 49cc49ea46..6d239412be 100644 --- a/typedapi/indices/stats/response.go +++ b/typedapi/indices/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/IndicesStatsResponse.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/IndicesStatsResponse.ts#L24-L30 type Response struct { All_ types.IndicesStats `json:"_all"` Indices map[string]types.IndicesStats `json:"indices,omitempty"` diff --git a/typedapi/indices/stats/stats.go b/typedapi/indices/stats/stats.go index 0b8e766799..3c7a12685d 100644 --- a/typedapi/indices/stats/stats.go +++ b/typedapi/indices/stats/stats.go @@ -16,11 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns statistics for one or more indices. -// For data streams, the API retrieves statistics for the stream’s backing +// Get index statistics. +// For data streams, the API retrieves statistics for the stream's backing // indices. +// +// By default, the returned statistics are index-level with `primaries` and +// `total` aggregations. +// `primaries` are the values for only the primary shards. +// `total` are the accumulated values for both primary and replica shards. +// +// To get shard-level statistics, set the `level` parameter to `shards`. +// +// NOTE: When moving to another node, the shard-level statistics for a shard are +// cleared. +// Although the shard is no longer part of the node, that node retains any +// node-level statistics to which the shard contributed. package stats import ( @@ -81,10 +93,22 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Returns statistics for one or more indices. -// For data streams, the API retrieves statistics for the stream’s backing +// Get index statistics. +// For data streams, the API retrieves statistics for the stream's backing // indices. // +// By default, the returned statistics are index-level with `primaries` and +// `total` aggregations. +// `primaries` are the values for only the primary shards. +// `total` are the accumulated values for both primary and replica shards. +// +// To get shard-level statistics, set the `level` parameter to `shards`. +// +// NOTE: When moving to another node, the shard-level statistics for a shard are +// cleared. +// Although the shard is no longer part of the node, that node retains any +// node-level statistics to which the shard contributed. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html func New(tp elastictransport.Interface) *Stats { r := &Stats{ diff --git a/typedapi/indices/unfreeze/response.go b/typedapi/indices/unfreeze/response.go index ab47c630a2..93c4f8c9d5 100644 --- a/typedapi/indices/unfreeze/response.go +++ b/typedapi/indices/unfreeze/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package unfreeze // Response holds the response body struct for the package unfreeze // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/unfreeze/IndicesUnfreezeResponse.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/unfreeze/IndicesUnfreezeResponse.ts#L20-L25 type Response struct { Acknowledged bool `json:"acknowledged"` ShardsAcknowledged bool `json:"shards_acknowledged"` diff --git a/typedapi/indices/unfreeze/unfreeze.go b/typedapi/indices/unfreeze/unfreeze.go index e74de0c83e..88fbdab9e0 100644 --- a/typedapi/indices/unfreeze/unfreeze.go +++ b/typedapi/indices/unfreeze/unfreeze.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Unfreezes an index. +// Unfreeze an index. +// When a frozen index is unfrozen, the index goes through the normal recovery +// process and becomes writeable again. package unfreeze import ( @@ -77,7 +79,9 @@ func NewUnfreezeFunc(tp elastictransport.Interface) NewUnfreeze { } } -// Unfreezes an index. +// Unfreeze an index. +// When a frozen index is unfrozen, the index goes through the normal recovery +// process and becomes writeable again. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html func New(tp elastictransport.Interface) *Unfreeze { diff --git a/typedapi/indices/updatealiases/request.go b/typedapi/indices/updatealiases/request.go index 0eb788656d..9143281507 100644 --- a/typedapi/indices/updatealiases/request.go +++ b/typedapi/indices/updatealiases/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatealiases @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatealiases // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/update_aliases/IndicesUpdateAliasesRequest.ts#L24-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/update_aliases/IndicesUpdateAliasesRequest.ts#L24-L59 type Request struct { // Actions Actions to perform. diff --git a/typedapi/indices/updatealiases/response.go b/typedapi/indices/updatealiases/response.go index f9ee4bdd84..8b8f4d24bc 100644 --- a/typedapi/indices/updatealiases/response.go +++ b/typedapi/indices/updatealiases/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatealiases // Response holds the response body struct for the package updatealiases // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/update_aliases/IndicesUpdateAliasesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/update_aliases/IndicesUpdateAliasesResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/indices/updatealiases/update_aliases.go b/typedapi/indices/updatealiases/update_aliases.go index 202bc8fbdd..104afd3a55 100644 --- a/typedapi/indices/updatealiases/update_aliases.go +++ b/typedapi/indices/updatealiases/update_aliases.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create or update an alias. // Adds a data stream or index to an alias. @@ -77,7 +77,7 @@ func NewUpdateAliasesFunc(tp elastictransport.Interface) NewUpdateAliases { // Create or update an alias. // Adds a data stream or index to an alias. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-update-aliases func New(tp elastictransport.Interface) *UpdateAliases { r := &UpdateAliases{ transport: tp, @@ -85,8 +85,6 @@ func New(tp elastictransport.Interface) *UpdateAliases { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -356,10 +354,17 @@ func (r *UpdateAliases) Pretty(pretty bool) *UpdateAliases { return r } -// Actions Actions to perform. +// Actions to perform. // API name: actions -func (r *UpdateAliases) Actions(actions ...types.IndicesAction) *UpdateAliases { - r.req.Actions = actions +func (r *UpdateAliases) Actions(actions ...types.IndicesActionVariant) *UpdateAliases { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range actions { + r.req.Actions = append(r.req.Actions, *v.IndicesActionCaster()) + + } return r } diff --git a/typedapi/indices/validatequery/request.go b/typedapi/indices/validatequery/request.go index eeaf1fc3e1..8d74d68a17 100644 --- a/typedapi/indices/validatequery/request.go +++ b/typedapi/indices/validatequery/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package validatequery @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package validatequery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/validate_query/IndicesValidateQueryRequest.ts#L25-L112 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/validate_query/IndicesValidateQueryRequest.ts#L25-L123 type Request struct { // Query Query in the Lucene query string syntax. diff --git a/typedapi/indices/validatequery/response.go b/typedapi/indices/validatequery/response.go index 0f8c17af08..e379107b06 100644 --- a/typedapi/indices/validatequery/response.go +++ b/typedapi/indices/validatequery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package validatequery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package validatequery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L23-L30 type Response struct { Error *string `json:"error,omitempty"` Explanations []types.IndicesValidationExplanation `json:"explanations,omitempty"` diff --git a/typedapi/indices/validatequery/validate_query.go b/typedapi/indices/validatequery/validate_query.go index fa5f0948d3..f1f993d028 100644 --- a/typedapi/indices/validatequery/validate_query.go +++ b/typedapi/indices/validatequery/validate_query.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Validate a query. // Validates a query without running it. @@ -93,8 +93,6 @@ func New(tp elastictransport.Interface) *ValidateQuery { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -486,11 +484,15 @@ func (r *ValidateQuery) Pretty(pretty bool) *ValidateQuery { return r } -// Query Query in the Lucene query string syntax. +// Query in the Lucene query string syntax. // API name: query -func (r *ValidateQuery) Query(query *types.Query) *ValidateQuery { +func (r *ValidateQuery) Query(query types.QueryVariant) *ValidateQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } diff --git a/typedapi/inference/chatcompletionunified/chat_completion_unified.go b/typedapi/inference/chatcompletionunified/chat_completion_unified.go new file mode 100644 index 0000000000..388bd9496e --- /dev/null +++ b/typedapi/inference/chatcompletionunified/chat_completion_unified.go @@ -0,0 +1,480 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Perform chat completion inference +package chatcompletionunified + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ChatCompletionUnified struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewChatCompletionUnified type alias for index. +type NewChatCompletionUnified func(inferenceid string) *ChatCompletionUnified + +// NewChatCompletionUnifiedFunc returns a new instance of ChatCompletionUnified with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewChatCompletionUnifiedFunc(tp elastictransport.Interface) NewChatCompletionUnified { + return func(inferenceid string) *ChatCompletionUnified { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform chat completion inference +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/chat-completion-inference-api.html +func New(tp elastictransport.Interface) *ChatCompletionUnified { + r := &ChatCompletionUnified{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ChatCompletionUnified) Raw(raw io.Reader) *ChatCompletionUnified { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ChatCompletionUnified) Request(req *Request) *ChatCompletionUnified { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ChatCompletionUnified) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ChatCompletionUnified: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("chat_completion") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_stream") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "text/event-stream") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ChatCompletionUnified) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.chat_completion_unified") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.chat_completion_unified") + if reader := instrument.RecordRequestBody(ctx, "inference.chat_completion_unified", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.chat_completion_unified") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ChatCompletionUnified query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a chatcompletionunified.Response +func (r ChatCompletionUnified) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.chat_completion_unified") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ChatCompletionUnified headers map. +func (r *ChatCompletionUnified) Header(key, value string) *ChatCompletionUnified { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *ChatCompletionUnified) _inferenceid(inferenceid string) *ChatCompletionUnified { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *ChatCompletionUnified) Timeout(duration string) *ChatCompletionUnified { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ChatCompletionUnified) ErrorTrace(errortrace bool) *ChatCompletionUnified { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ChatCompletionUnified) FilterPath(filterpaths ...string) *ChatCompletionUnified { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ChatCompletionUnified) Human(human bool) *ChatCompletionUnified { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ChatCompletionUnified) Pretty(pretty bool) *ChatCompletionUnified { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The upper bound limit for the number of tokens that can be generated for a +// completion request. +// API name: max_completion_tokens +func (r *ChatCompletionUnified) MaxCompletionTokens(maxcompletiontokens int64) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxCompletionTokens = &maxcompletiontokens + + return r +} + +// A list of objects representing the conversation. +// API name: messages +func (r *ChatCompletionUnified) Messages(messages ...types.MessageVariant) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range messages { + + r.req.Messages = append(r.req.Messages, *v.MessageCaster()) + + } + return r +} + +// The ID of the model to use. +// API name: model +func (r *ChatCompletionUnified) Model(model string) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Model = &model + + return r +} + +// A sequence of strings to control when the model should stop generating +// additional tokens. +// API name: stop +func (r *ChatCompletionUnified) Stop(stops ...string) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stops { + + r.req.Stop = append(r.req.Stop, v) + + } + return r +} + +// The sampling temperature to use. +// API name: temperature +func (r *ChatCompletionUnified) Temperature(temperature float32) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Temperature = &temperature + + return r +} + +// Controls which tool is called by the model. +// API name: tool_choice +func (r *ChatCompletionUnified) ToolChoice(completiontooltype types.CompletionToolTypeVariant) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ToolChoice = *completiontooltype.CompletionToolTypeCaster() + + return r +} + +// A list of tools that the model can call. +// API name: tools +func (r *ChatCompletionUnified) Tools(tools ...types.CompletionToolVariant) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range tools { + + r.req.Tools = append(r.req.Tools, *v.CompletionToolCaster()) + + } + return r +} + +// Nucleus sampling, an alternative to sampling with temperature. +// API name: top_p +func (r *ChatCompletionUnified) TopP(topp float32) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TopP = &topp + + return r +} diff --git a/typedapi/inference/chatcompletionunified/request.go b/typedapi/inference/chatcompletionunified/request.go new file mode 100644 index 0000000000..6b01bd30a1 --- /dev/null +++ b/typedapi/inference/chatcompletionunified/request.go @@ -0,0 +1,205 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package chatcompletionunified + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package chatcompletionunified +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L26-L87 +type Request struct { + + // MaxCompletionTokens The upper bound limit for the number of tokens that can be generated for a + // completion request. + MaxCompletionTokens *int64 `json:"max_completion_tokens,omitempty"` + // Messages A list of objects representing the conversation. + Messages []types.Message `json:"messages"` + // Model The ID of the model to use. + Model *string `json:"model,omitempty"` + // Stop A sequence of strings to control when the model should stop generating + // additional tokens. + Stop []string `json:"stop,omitempty"` + // Temperature The sampling temperature to use. + Temperature *float32 `json:"temperature,omitempty"` + // ToolChoice Controls which tool is called by the model. + ToolChoice types.CompletionToolType `json:"tool_choice,omitempty"` + // Tools A list of tools that the model can call. + Tools []types.CompletionTool `json:"tools,omitempty"` + // TopP Nucleus sampling, an alternative to sampling with temperature. + TopP *float32 `json:"top_p,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Chatcompletionunified request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_completion_tokens": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxCompletionTokens", err) + } + s.MaxCompletionTokens = &value + case float64: + f := int64(v) + s.MaxCompletionTokens = &f + } + + case "messages": + if err := dec.Decode(&s.Messages); err != nil { + return fmt.Errorf("%s | %w", "Messages", err) + } + + case "model": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Model = &o + + case "stop": + if err := dec.Decode(&s.Stop); err != nil { + return fmt.Errorf("%s | %w", "Stop", err) + } + + case "temperature": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Temperature", err) + } + f := float32(value) + s.Temperature = &f + case float64: + f := float32(v) + s.Temperature = &f + } + + case "tool_choice": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + toolchoice_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + + switch t { + + case "function", "type": + o := types.NewCompletionToolChoice() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + s.ToolChoice = o + break toolchoice_field + + } + } + if s.ToolChoice == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.ToolChoice); err != nil { + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + } + + case "tools": + if err := dec.Decode(&s.Tools); err != nil { + return fmt.Errorf("%s | %w", "Tools", err) + } + + case "top_p": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "TopP", err) + } + f := float32(value) + s.TopP = &f + case float64: + f := float32(v) + s.TopP = &f + } + + } + } + return nil +} diff --git a/typedapi/inference/chatcompletionunified/response.go b/typedapi/inference/chatcompletionunified/response.go new file mode 100644 index 0000000000..0485274ded --- /dev/null +++ b/typedapi/inference/chatcompletionunified/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package chatcompletionunified + +// Response holds the response body struct for the package chatcompletionunified +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/completion/completion.go b/typedapi/inference/completion/completion.go new file mode 100644 index 0000000000..a0cc713211 --- /dev/null +++ b/typedapi/inference/completion/completion.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Perform completion inference on the service +package completion + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Completion struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCompletion type alias for index. +type NewCompletion func(inferenceid string) *Completion + +// NewCompletionFunc returns a new instance of Completion with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCompletionFunc(tp elastictransport.Interface) NewCompletion { + return func(inferenceid string) *Completion { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform completion inference on the service +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html +func New(tp elastictransport.Interface) *Completion { + r := &Completion{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Completion) Raw(raw io.Reader) *Completion { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Completion) Request(req *Request) *Completion { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Completion) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Completion: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Completion) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.completion") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.completion") + if reader := instrument.RecordRequestBody(ctx, "inference.completion", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.completion") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Completion query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a completion.Response +func (r Completion) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Completion headers map. +func (r *Completion) Header(key, value string) *Completion { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *Completion) _inferenceid(inferenceid string) *Completion { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *Completion) Timeout(duration string) *Completion { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Completion) ErrorTrace(errortrace bool) *Completion { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Completion) FilterPath(filterpaths ...string) *Completion { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Completion) Human(human bool) *Completion { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Completion) Pretty(pretty bool) *Completion { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Inference input. +// Either a string or an array of strings. +// API name: input +func (r *Completion) Input(inputs ...string) *Completion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *Completion) TaskSettings(tasksettings json.RawMessage) *Completion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/inference/inference/request.go b/typedapi/inference/completion/request.go similarity index 73% rename from typedapi/inference/inference/request.go rename to typedapi/inference/completion/request.go index 004248b704..9d8d011b65 100644 --- a/typedapi/inference/inference/request.go +++ b/typedapi/inference/completion/request.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -package inference +package completion import ( "bytes" @@ -26,20 +26,16 @@ import ( "errors" "fmt" "io" - "strconv" ) -// Request holds the request body struct for the package inference +// Request holds the request body struct for the package completion // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/inference/InferenceRequest.ts#L26-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/completion/CompletionRequest.ts#L25-L63 type Request struct { // Input Inference input. // Either a string or an array of strings. Input []string `json:"input"` - // Query Query input, required for rerank task. - // Not required for other tasks. - Query *string `json:"query,omitempty"` // TaskSettings Optional task settings TaskSettings json.RawMessage `json:"task_settings,omitempty"` } @@ -57,7 +53,7 @@ func (r *Request) FromJSON(data string) (*Request, error) { err := json.Unmarshal([]byte(data), &req) if err != nil { - return nil, fmt.Errorf("could not deserialise json into Inference request: %w", err) + return nil, fmt.Errorf("could not deserialise json into Completion request: %w", err) } return &req, nil @@ -93,18 +89,6 @@ func (s *Request) UnmarshalJSON(data []byte) error { } } - case "query": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Query", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Query = &o - case "task_settings": if err := dec.Decode(&s.TaskSettings); err != nil { return fmt.Errorf("%s | %w", "TaskSettings", err) diff --git a/typedapi/inference/completion/response.go b/typedapi/inference/completion/response.go new file mode 100644 index 0000000000..7757a0317c --- /dev/null +++ b/typedapi/inference/completion/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package completion + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package completion +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/completion/CompletionResponse.ts#L22-L24 + +type Response []types.CompletionResult + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/delete/delete.go b/typedapi/inference/delete/delete.go index 7170afc0b0..b590f7c312 100644 --- a/typedapi/inference/delete/delete.go +++ b/typedapi/inference/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete an inference endpoint package delete @@ -317,7 +317,7 @@ func (r *Delete) TaskType(tasktype string) *Delete { return r } -// InferenceId The inference Id +// InferenceId The inference identifier. // API Name: inferenceid func (r *Delete) _inferenceid(inferenceid string) *Delete { r.paramSet |= inferenceidMask @@ -326,8 +326,8 @@ func (r *Delete) _inferenceid(inferenceid string) *Delete { return r } -// DryRun When true, the endpoint is not deleted, and a list of ingest processors which -// reference this endpoint is returned +// DryRun When true, the endpoint is not deleted and a list of ingest processors which +// reference this endpoint is returned. // API name: dry_run func (r *Delete) DryRun(dryrun bool) *Delete { r.values.Set("dry_run", strconv.FormatBool(dryrun)) @@ -336,7 +336,7 @@ func (r *Delete) DryRun(dryrun bool) *Delete { } // Force When true, the inference endpoint is forcefully deleted even if it is still -// being used by ingest processors or semantic text fields +// being used by ingest processors or semantic text fields. // API name: force func (r *Delete) Force(force bool) *Delete { r.values.Set("force", strconv.FormatBool(force)) diff --git a/typedapi/inference/delete/response.go b/typedapi/inference/delete/response.go index 48e7a23325..5add0b2870 100644 --- a/typedapi/inference/delete/response.go +++ b/typedapi/inference/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/delete/DeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/delete/DeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/inference/get/get.go b/typedapi/inference/get/get.go index 004d141a4f..212490c59c 100644 --- a/typedapi/inference/get/get.go +++ b/typedapi/inference/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get an inference endpoint package get diff --git a/typedapi/inference/get/response.go b/typedapi/inference/get/response.go index 52b5547969..28c015959c 100644 --- a/typedapi/inference/get/response.go +++ b/typedapi/inference/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/get/GetResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/get/GetResponse.ts#L22-L26 type Response struct { Endpoints []types.InferenceEndpointInfo `json:"endpoints"` } diff --git a/typedapi/inference/put/put.go b/typedapi/inference/put/put.go index a8099b92e9..0920c2c625 100644 --- a/typedapi/inference/put/put.go +++ b/typedapi/inference/put/put.go @@ -16,9 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Create an inference endpoint +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Create an inference endpoint. +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, +// Anthropic, Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. package put import ( @@ -84,7 +103,26 @@ func NewPutFunc(tp elastictransport.Interface) NewPut { } } -// Create an inference endpoint +// Create an inference endpoint. +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, +// Anthropic, Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-inference-api.html func New(tp elastictransport.Interface) *Put { @@ -94,8 +132,6 @@ func New(tp elastictransport.Interface) *Put { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -386,26 +422,53 @@ func (r *Put) Pretty(pretty bool) *Put { return r } -// Service The service type +// Chunking configuration object +// API name: chunking_settings +func (r *Put) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The service type // API name: service func (r *Put) Service(service string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Service = service return r } -// ServiceSettings Settings specific to the service +// Settings specific to the service // API name: service_settings func (r *Put) ServiceSettings(servicesettings json.RawMessage) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ServiceSettings = servicesettings return r } -// TaskSettings Task settings specific to the service and task type +// Task settings specific to the service and task type // API name: task_settings func (r *Put) TaskSettings(tasksettings json.RawMessage) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TaskSettings = tasksettings return r diff --git a/typedapi/inference/put/request.go b/typedapi/inference/put/request.go index 960035ca13..912a9bae7b 100644 --- a/typedapi/inference/put/request.go +++ b/typedapi/inference/put/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package put @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/put/PutRequest.ts#L25-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put/PutRequest.ts#L25-L65 type Request = types.InferenceEndpoint // NewRequest returns a Request diff --git a/typedapi/inference/put/response.go b/typedapi/inference/put/response.go index 2683c7c899..f86093014a 100644 --- a/typedapi/inference/put/response.go +++ b/typedapi/inference/put/response.go @@ -16,21 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package put import ( "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tasktype" ) // Response holds the response body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/put/PutResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put/PutResponse.ts#L22-L24 type Response struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` // InferenceId The inference Id InferenceId string `json:"inference_id"` // Service The service type @@ -38,7 +41,7 @@ type Response struct { // ServiceSettings Settings specific to the service ServiceSettings json.RawMessage `json:"service_settings"` // TaskSettings Task settings specific to the service and task type - TaskSettings json.RawMessage `json:"task_settings"` + TaskSettings json.RawMessage `json:"task_settings,omitempty"` // TaskType The task type TaskType tasktype.TaskType `json:"task_type"` } diff --git a/typedapi/inference/putopenai/put_openai.go b/typedapi/inference/putopenai/put_openai.go new file mode 100644 index 0000000000..30d9e8bb81 --- /dev/null +++ b/typedapi/inference/putopenai/put_openai.go @@ -0,0 +1,458 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Create an OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `openai` +// service. +// +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +package putopenai + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/servicetype" +) + +const ( + tasktypeMask = iota + 1 + + openaiinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutOpenai struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + openaiinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutOpenai type alias for index. +type NewPutOpenai func(tasktype, openaiinferenceid string) *PutOpenai + +// NewPutOpenaiFunc returns a new instance of PutOpenai with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutOpenaiFunc(tp elastictransport.Interface) NewPutOpenai { + return func(tasktype, openaiinferenceid string) *PutOpenai { + n := New(tp) + + n._tasktype(tasktype) + + n._openaiinferenceid(openaiinferenceid) + + return n + } +} + +// Create an OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `openai` +// service. +// +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-openai.html +func New(tp elastictransport.Interface) *PutOpenai { + r := &PutOpenai{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutOpenai) Raw(raw io.Reader) *PutOpenai { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutOpenai) Request(req *Request) *PutOpenai { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutOpenai) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutOpenai: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|openaiinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "openaiinferenceid", r.openaiinferenceid) + } + path.WriteString(r.openaiinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutOpenai) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_openai") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_openai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_openai", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_openai") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutOpenai query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putopenai.Response +func (r PutOpenai) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_openai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutOpenai headers map. +func (r *PutOpenai) Header(key, value string) *PutOpenai { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// NOTE: The `chat_completion` task type only supports streaming and only +// through the _stream API. +// API Name: tasktype +func (r *PutOpenai) _tasktype(tasktype string) *PutOpenai { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// OpenaiInferenceId The unique identifier of the inference endpoint. +// API Name: openaiinferenceid +func (r *PutOpenai) _openaiinferenceid(openaiinferenceid string) *PutOpenai { + r.paramSet |= openaiinferenceidMask + r.openaiinferenceid = openaiinferenceid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutOpenai) ErrorTrace(errortrace bool) *PutOpenai { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutOpenai) FilterPath(filterpaths ...string) *PutOpenai { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutOpenai) Human(human bool) *PutOpenai { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutOpenai) Pretty(pretty bool) *PutOpenai { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutOpenai) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `openai`. +// API name: service +func (r *PutOpenai) Service(service servicetype.ServiceType) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `openai` service. +// API name: service_settings +func (r *PutOpenai) ServiceSettings(servicesettings types.OpenAIServiceSettingsVariant) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.OpenAIServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutOpenai) TaskSettings(tasksettings types.OpenAITaskSettingsVariant) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.OpenAITaskSettingsCaster() + + return r +} diff --git a/typedapi/inference/putopenai/request.go b/typedapi/inference/putopenai/request.go new file mode 100644 index 0000000000..c719c3ec16 --- /dev/null +++ b/typedapi/inference/putopenai/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package putopenai + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/servicetype" +) + +// Request holds the request body struct for the package putopenai +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_openai/PutOpenAiRequest.ts#L28-L82 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `openai`. + Service servicetype.ServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `openai` service. + ServiceSettings types.OpenAIServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.OpenAITaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putopenai request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/inference/putopenai/response.go b/typedapi/inference/putopenai/response.go new file mode 100644 index 0000000000..39fbfde71d --- /dev/null +++ b/typedapi/inference/putopenai/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package putopenai + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tasktype" +) + +// Response holds the response body struct for the package putopenai +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_openai/PutOpenAiResponse.ts#L22-L24 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktype.TaskType `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/inference/putwatsonx/put_watsonx.go b/typedapi/inference/putwatsonx/put_watsonx.go new file mode 100644 index 0000000000..c856a1c542 --- /dev/null +++ b/typedapi/inference/putwatsonx/put_watsonx.go @@ -0,0 +1,438 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Create a Watsonx inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `watsonxai` service. +// You need an IBM Cloud Databases for Elasticsearch deployment to use the +// `watsonxai` inference service. +// You can provision one through the IBM catalog, the Cloud Databases CLI +// plug-in, the Cloud Databases API, or Terraform. +// +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +package putwatsonx + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/servicetype" +) + +const ( + tasktypeMask = iota + 1 + + watsonxinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutWatsonx struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + watsonxinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutWatsonx type alias for index. +type NewPutWatsonx func(tasktype, watsonxinferenceid string) *PutWatsonx + +// NewPutWatsonxFunc returns a new instance of PutWatsonx with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutWatsonxFunc(tp elastictransport.Interface) NewPutWatsonx { + return func(tasktype, watsonxinferenceid string) *PutWatsonx { + n := New(tp) + + n._tasktype(tasktype) + + n._watsonxinferenceid(watsonxinferenceid) + + return n + } +} + +// Create a Watsonx inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `watsonxai` service. +// You need an IBM Cloud Databases for Elasticsearch deployment to use the +// `watsonxai` inference service. +// You can provision one through the IBM catalog, the Cloud Databases CLI +// plug-in, the Cloud Databases API, or Terraform. +// +// When you create an inference endpoint, the associated machine learning model +// is automatically deployed if it is not already running. +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-watsonx-ai.html +func New(tp elastictransport.Interface) *PutWatsonx { + r := &PutWatsonx{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutWatsonx) Raw(raw io.Reader) *PutWatsonx { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutWatsonx) Request(req *Request) *PutWatsonx { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutWatsonx) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutWatsonx: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|watsonxinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "watsonxinferenceid", r.watsonxinferenceid) + } + path.WriteString(r.watsonxinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutWatsonx) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_watsonx") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_watsonx") + if reader := instrument.RecordRequestBody(ctx, "inference.put_watsonx", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_watsonx") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutWatsonx query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putwatsonx.Response +func (r PutWatsonx) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_watsonx") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutWatsonx headers map. +func (r *PutWatsonx) Header(key, value string) *PutWatsonx { + r.headers.Set(key, value) + + return r +} + +// TaskType The task type. +// The only valid task type for the model to perform is `text_embedding`. +// API Name: tasktype +func (r *PutWatsonx) _tasktype(tasktype string) *PutWatsonx { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// WatsonxInferenceId The unique identifier of the inference endpoint. +// API Name: watsonxinferenceid +func (r *PutWatsonx) _watsonxinferenceid(watsonxinferenceid string) *PutWatsonx { + r.paramSet |= watsonxinferenceidMask + r.watsonxinferenceid = watsonxinferenceid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutWatsonx) ErrorTrace(errortrace bool) *PutWatsonx { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutWatsonx) FilterPath(filterpaths ...string) *PutWatsonx { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutWatsonx) Human(human bool) *PutWatsonx { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutWatsonx) Pretty(pretty bool) *PutWatsonx { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The type of service supported for the specified task type. In this case, +// `watsonxai`. +// API name: service +func (r *PutWatsonx) Service(service servicetype.ServiceType) *PutWatsonx { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `watsonxai` service. +// API name: service_settings +func (r *PutWatsonx) ServiceSettings(servicesettings types.WatsonxServiceSettingsVariant) *PutWatsonx { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.WatsonxServiceSettingsCaster() + + return r +} diff --git a/typedapi/inference/putwatsonx/request.go b/typedapi/inference/putwatsonx/request.go new file mode 100644 index 0000000000..3d5157d8f1 --- /dev/null +++ b/typedapi/inference/putwatsonx/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package putwatsonx + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/servicetype" +) + +// Request holds the request body struct for the package putwatsonx +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_watsonx/PutWatsonxRequest.ts#L24-L70 +type Request struct { + + // Service The type of service supported for the specified task type. In this case, + // `watsonxai`. + Service servicetype.ServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `watsonxai` service. + ServiceSettings types.WatsonxServiceSettings `json:"service_settings"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putwatsonx request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/inference/putwatsonx/response.go b/typedapi/inference/putwatsonx/response.go new file mode 100644 index 0000000000..a5574fe48b --- /dev/null +++ b/typedapi/inference/putwatsonx/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package putwatsonx + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tasktype" +) + +// Response holds the response body struct for the package putwatsonx +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_watsonx/PutWatsonxResponse.ts#L22-L24 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktype.TaskType `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/inference/rerank/request.go b/typedapi/inference/rerank/request.go new file mode 100644 index 0000000000..ca6ba3d311 --- /dev/null +++ b/typedapi/inference/rerank/request.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package rerank + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package rerank +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/rerank/RerankRequest.ts#L25-L72 +type Request struct { + + // Input The text on which you want to perform the inference task. + // It can be a single string or an array. + // + // > info + // > Inference endpoints for the `completion` task type currently only support a + // single string as input. + Input []string `json:"input"` + // Query Query input. + Query string `json:"query"` + // TaskSettings Task settings for the individual inference request. + // These settings are specific to the task type you specified and override the + // task settings specified when initializing the service. + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Rerank request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/typedapi/inference/rerank/rerank.go b/typedapi/inference/rerank/rerank.go new file mode 100644 index 0000000000..47eec80077 --- /dev/null +++ b/typedapi/inference/rerank/rerank.go @@ -0,0 +1,412 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Perform rereanking inference on the service +package rerank + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Rerank struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRerank type alias for index. +type NewRerank func(inferenceid string) *Rerank + +// NewRerankFunc returns a new instance of Rerank with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRerankFunc(tp elastictransport.Interface) NewRerank { + return func(inferenceid string) *Rerank { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform rereanking inference on the service +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html +func New(tp elastictransport.Interface) *Rerank { + r := &Rerank{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Rerank) Raw(raw io.Reader) *Rerank { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Rerank) Request(req *Request) *Rerank { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Rerank) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Rerank: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("rerank") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Rerank) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.rerank") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.rerank") + if reader := instrument.RecordRequestBody(ctx, "inference.rerank", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.rerank") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Rerank query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a rerank.Response +func (r Rerank) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.rerank") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Rerank headers map. +func (r *Rerank) Header(key, value string) *Rerank { + r.headers.Set(key, value) + + return r +} + +// InferenceId The unique identifier for the inference endpoint. +// API Name: inferenceid +func (r *Rerank) _inferenceid(inferenceid string) *Rerank { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout The amount of time to wait for the inference request to complete. +// API name: timeout +func (r *Rerank) Timeout(duration string) *Rerank { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Rerank) ErrorTrace(errortrace bool) *Rerank { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Rerank) FilterPath(filterpaths ...string) *Rerank { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Rerank) Human(human bool) *Rerank { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Rerank) Pretty(pretty bool) *Rerank { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The text on which you want to perform the inference task. +// It can be a single string or an array. +// +// > info +// > Inference endpoints for the `completion` task type currently only support a +// single string as input. +// API name: input +func (r *Rerank) Input(inputs ...string) *Rerank { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Query input. +// API name: query +func (r *Rerank) Query(query string) *Rerank { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query + + return r +} + +// Task settings for the individual inference request. +// These settings are specific to the task type you specified and override the +// task settings specified when initializing the service. +// API name: task_settings +func (r *Rerank) TaskSettings(tasksettings json.RawMessage) *Rerank { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/inference/rerank/response.go b/typedapi/inference/rerank/response.go new file mode 100644 index 0000000000..3b5bd46b40 --- /dev/null +++ b/typedapi/inference/rerank/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package rerank + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package rerank +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/rerank/RerankResponse.ts#L22-L24 + +type Response []types.RankedDocument + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/sparseembedding/request.go b/typedapi/inference/sparseembedding/request.go new file mode 100644 index 0000000000..5b1e63248c --- /dev/null +++ b/typedapi/inference/sparseembedding/request.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package sparseembedding + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package sparseembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/sparse_embedding/SparseEmbeddingRequest.ts#L25-L63 +type Request struct { + + // Input Inference input. + // Either a string or an array of strings. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Sparseembedding request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/typedapi/inference/sparseembedding/response.go b/typedapi/inference/sparseembedding/response.go new file mode 100644 index 0000000000..e47e545025 --- /dev/null +++ b/typedapi/inference/sparseembedding/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package sparseembedding + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package sparseembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/sparse_embedding/SparseEmbeddingResponse.ts#L22-L24 + +type Response []types.SparseEmbeddingResult + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/sparseembedding/sparse_embedding.go b/typedapi/inference/sparseembedding/sparse_embedding.go new file mode 100644 index 0000000000..78088254f1 --- /dev/null +++ b/typedapi/inference/sparseembedding/sparse_embedding.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Perform sparse embedding inference on the service +package sparseembedding + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SparseEmbedding struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSparseEmbedding type alias for index. +type NewSparseEmbedding func(inferenceid string) *SparseEmbedding + +// NewSparseEmbeddingFunc returns a new instance of SparseEmbedding with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSparseEmbeddingFunc(tp elastictransport.Interface) NewSparseEmbedding { + return func(inferenceid string) *SparseEmbedding { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform sparse embedding inference on the service +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html +func New(tp elastictransport.Interface) *SparseEmbedding { + r := &SparseEmbedding{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SparseEmbedding) Raw(raw io.Reader) *SparseEmbedding { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SparseEmbedding) Request(req *Request) *SparseEmbedding { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SparseEmbedding) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SparseEmbedding: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("sparse_embedding") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SparseEmbedding) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.sparse_embedding") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.sparse_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.sparse_embedding", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.sparse_embedding") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SparseEmbedding query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a sparseembedding.Response +func (r SparseEmbedding) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.sparse_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SparseEmbedding headers map. +func (r *SparseEmbedding) Header(key, value string) *SparseEmbedding { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *SparseEmbedding) _inferenceid(inferenceid string) *SparseEmbedding { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *SparseEmbedding) Timeout(duration string) *SparseEmbedding { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SparseEmbedding) ErrorTrace(errortrace bool) *SparseEmbedding { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SparseEmbedding) FilterPath(filterpaths ...string) *SparseEmbedding { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SparseEmbedding) Human(human bool) *SparseEmbedding { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SparseEmbedding) Pretty(pretty bool) *SparseEmbedding { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Inference input. +// Either a string or an array of strings. +// API name: input +func (r *SparseEmbedding) Input(inputs ...string) *SparseEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *SparseEmbedding) TaskSettings(tasksettings json.RawMessage) *SparseEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/inference/streamcompletion/request.go b/typedapi/inference/streamcompletion/request.go new file mode 100644 index 0000000000..d91d565057 --- /dev/null +++ b/typedapi/inference/streamcompletion/request.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package streamcompletion + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package streamcompletion +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/stream_completion/StreamInferenceRequest.ts#L24-L63 +type Request struct { + + // Input The text on which you want to perform the inference task. + // It can be a single string or an array. + // + // NOTE: Inference endpoints for the completion task type currently only support + // a single string as input. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Streamcompletion request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/typedapi/inference/streamcompletion/response.go b/typedapi/inference/streamcompletion/response.go new file mode 100644 index 0000000000..49d41b8e73 --- /dev/null +++ b/typedapi/inference/streamcompletion/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package streamcompletion + +// Response holds the response body struct for the package streamcompletion +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/stream_completion/StreamInferenceResponse.ts#L22-L24 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/typedapi/inference/streamcompletion/stream_completion.go b/typedapi/inference/streamcompletion/stream_completion.go new file mode 100644 index 0000000000..6001dd483b --- /dev/null +++ b/typedapi/inference/streamcompletion/stream_completion.go @@ -0,0 +1,422 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Perform streaming inference. +// Get real-time responses for completion tasks by delivering answers +// incrementally, reducing response times during computation. +// This API works only with the completion task type. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. For built-in models and models uploaded through +// Eland, the inference APIs offer an alternative way to use and manage trained +// models. However, if you do not plan to use the inference APIs to use these +// models or if you want to use non-NLP models, use the machine learning trained +// model APIs. +// +// This API requires the `monitor_inference` cluster privilege (the built-in +// `inference_admin` and `inference_user` roles grant this privilege). You must +// use a client that supports streaming. +package streamcompletion + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StreamCompletion struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStreamCompletion type alias for index. +type NewStreamCompletion func(inferenceid string) *StreamCompletion + +// NewStreamCompletionFunc returns a new instance of StreamCompletion with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStreamCompletionFunc(tp elastictransport.Interface) NewStreamCompletion { + return func(inferenceid string) *StreamCompletion { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform streaming inference. +// Get real-time responses for completion tasks by delivering answers +// incrementally, reducing response times during computation. +// This API works only with the completion task type. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. For built-in models and models uploaded through +// Eland, the inference APIs offer an alternative way to use and manage trained +// models. However, if you do not plan to use the inference APIs to use these +// models or if you want to use non-NLP models, use the machine learning trained +// model APIs. +// +// This API requires the `monitor_inference` cluster privilege (the built-in +// `inference_admin` and `inference_user` roles grant this privilege). You must +// use a client that supports streaming. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/stream-inference-api.html +func New(tp elastictransport.Interface) *StreamCompletion { + r := &StreamCompletion{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *StreamCompletion) Raw(raw io.Reader) *StreamCompletion { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *StreamCompletion) Request(req *Request) *StreamCompletion { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StreamCompletion) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for StreamCompletion: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_stream") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "text/event-stream") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StreamCompletion) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.stream_completion") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.stream_completion") + if reader := instrument.RecordRequestBody(ctx, "inference.stream_completion", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.stream_completion") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StreamCompletion query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a streamcompletion.Response +func (r StreamCompletion) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.stream_completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the StreamCompletion headers map. +func (r *StreamCompletion) Header(key, value string) *StreamCompletion { + r.headers.Set(key, value) + + return r +} + +// InferenceId The unique identifier for the inference endpoint. +// API Name: inferenceid +func (r *StreamCompletion) _inferenceid(inferenceid string) *StreamCompletion { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StreamCompletion) ErrorTrace(errortrace bool) *StreamCompletion { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StreamCompletion) FilterPath(filterpaths ...string) *StreamCompletion { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StreamCompletion) Human(human bool) *StreamCompletion { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StreamCompletion) Pretty(pretty bool) *StreamCompletion { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The text on which you want to perform the inference task. +// It can be a single string or an array. +// +// NOTE: Inference endpoints for the completion task type currently only support +// a single string as input. +// API name: input +func (r *StreamCompletion) Input(inputs ...string) *StreamCompletion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *StreamCompletion) TaskSettings(tasksettings json.RawMessage) *StreamCompletion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/types/matrixaggregation.go b/typedapi/inference/textembedding/request.go similarity index 51% rename from typedapi/types/matrixaggregation.go rename to typedapi/inference/textembedding/request.go index 9410270395..6dca00339d 100644 --- a/typedapi/types/matrixaggregation.go +++ b/typedapi/inference/textembedding/request.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -package types +package textembedding import ( "bytes" @@ -28,19 +28,38 @@ import ( "io" ) -// MatrixAggregation type. +// Request holds the request body struct for the package textembedding // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/matrix.ts#L26-L36 -type MatrixAggregation struct { - // Fields An array of fields for computing the statistics. - Fields []string `json:"fields,omitempty"` - // Missing The value to apply to documents that do not have a value. - // By default, documents without a value are ignored. - Missing map[string]Float64 `json:"missing,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/text_embedding/TextEmbeddingRequest.ts#L25-L63 +type Request struct { + + // Input Inference input. + // Either a string or an array of strings. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r } -func (s *MatrixAggregation) UnmarshalJSON(data []byte) error { +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Textembedding request: %w", err) + } + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -54,40 +73,28 @@ func (s *MatrixAggregation) UnmarshalJSON(data []byte) error { switch t { - case "fields": + case "input": rawMsg := json.RawMessage{} dec.Decode(&rawMsg) if !bytes.HasPrefix(rawMsg, []byte("[")) { o := new(string) if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Fields", err) + return fmt.Errorf("%s | %w", "Input", err) } - s.Fields = append(s.Fields, *o) + s.Input = append(s.Input, *o) } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { - return fmt.Errorf("%s | %w", "Fields", err) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) } } - case "missing": - if s.Missing == nil { - s.Missing = make(map[string]Float64, 0) - } - if err := dec.Decode(&s.Missing); err != nil { - return fmt.Errorf("%s | %w", "Missing", err) + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) } } } return nil } - -// NewMatrixAggregation returns a MatrixAggregation. -func NewMatrixAggregation() *MatrixAggregation { - r := &MatrixAggregation{ - Missing: make(map[string]Float64, 0), - } - - return r -} diff --git a/typedapi/inference/textembedding/response.go b/typedapi/inference/textembedding/response.go new file mode 100644 index 0000000000..4acf70751c --- /dev/null +++ b/typedapi/inference/textembedding/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package textembedding + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package textembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/text_embedding/TextEmbeddingResponse.ts#L22-L24 +type Response struct { + AdditionalTextEmbeddingInferenceResultProperty map[string]json.RawMessage `json:"-"` + TextEmbedding []types.TextEmbeddingResult `json:"text_embedding,omitempty"` + TextEmbeddingBytes []types.TextEmbeddingByteResult `json:"text_embedding_bytes,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + AdditionalTextEmbeddingInferenceResultProperty: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/typedapi/inference/inference/inference.go b/typedapi/inference/textembedding/text_embedding.go similarity index 71% rename from typedapi/inference/inference/inference.go rename to typedapi/inference/textembedding/text_embedding.go index af27b4e0fd..35b6b0b28a 100644 --- a/typedapi/inference/inference/inference.go +++ b/typedapi/inference/textembedding/text_embedding.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Perform inference on the service -package inference +// Perform text embedding inference on the service +package textembedding import ( gobytes "bytes" @@ -38,15 +38,13 @@ import ( ) const ( - tasktypeMask = iota + 1 - - inferenceidMask + inferenceidMask = iota + 1 ) // ErrBuildPath is returned in case of missing parameters within the build of the request. var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") -type Inference struct { +type TextEmbedding struct { transport elastictransport.Interface headers http.Header @@ -61,7 +59,6 @@ type Inference struct { paramSet int - tasktype string inferenceid string spanStarted bool @@ -69,13 +66,13 @@ type Inference struct { instrument elastictransport.Instrumentation } -// NewInference type alias for index. -type NewInference func(inferenceid string) *Inference +// NewTextEmbedding type alias for index. +type NewTextEmbedding func(inferenceid string) *TextEmbedding -// NewInferenceFunc returns a new instance of Inference with the provided transport. +// NewTextEmbeddingFunc returns a new instance of TextEmbedding with the provided transport. // Used in the index of the library this allows to retrieve every apis in once place. -func NewInferenceFunc(tp elastictransport.Interface) NewInference { - return func(inferenceid string) *Inference { +func NewTextEmbeddingFunc(tp elastictransport.Interface) NewTextEmbedding { + return func(inferenceid string) *TextEmbedding { n := New(tp) n._inferenceid(inferenceid) @@ -84,18 +81,16 @@ func NewInferenceFunc(tp elastictransport.Interface) NewInference { } } -// Perform inference on the service +// Perform text embedding inference on the service // // https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html -func New(tp elastictransport.Interface) *Inference { - r := &Inference{ +func New(tp elastictransport.Interface) *TextEmbedding { + r := &TextEmbedding{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -109,14 +104,14 @@ func New(tp elastictransport.Interface) *Inference { // Raw takes a json payload as input which is then passed to the http.Request // If specified Raw takes precedence on Request method. -func (r *Inference) Raw(raw io.Reader) *Inference { +func (r *TextEmbedding) Raw(raw io.Reader) *TextEmbedding { r.raw = raw return r } // Request allows to set the request property with the appropriate payload. -func (r *Inference) Request(req *Request) *Inference { +func (r *TextEmbedding) Request(req *Request) *TextEmbedding { r.req = req return r @@ -124,7 +119,7 @@ func (r *Inference) Request(req *Request) *Inference { // HttpRequest returns the http.Request object built from the // given parameters. -func (r *Inference) HttpRequest(ctx context.Context) (*http.Request, error) { +func (r *TextEmbedding) HttpRequest(ctx context.Context) (*http.Request, error) { var path strings.Builder var method string var req *http.Request @@ -145,7 +140,7 @@ func (r *Inference) HttpRequest(ctx context.Context) (*http.Request, error) { data, err := json.Marshal(r.req) if err != nil { - return nil, fmt.Errorf("could not serialise request for Inference: %w", err) + return nil, fmt.Errorf("could not serialise request for TextEmbedding: %w", err) } r.buf.Write(data) @@ -163,22 +158,7 @@ func (r *Inference) HttpRequest(ctx context.Context) (*http.Request, error) { path.WriteString("/") path.WriteString("_inference") path.WriteString("/") - - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) - } - path.WriteString(r.inferenceid) - - method = http.MethodPost - case r.paramSet == tasktypeMask|inferenceidMask: - path.WriteString("/") - path.WriteString("_inference") - path.WriteString("/") - - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordPathPart(ctx, "tasktype", r.tasktype) - } - path.WriteString(r.tasktype) + path.WriteString("text_embedding") path.WriteString("/") if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -222,11 +202,11 @@ func (r *Inference) HttpRequest(ctx context.Context) (*http.Request, error) { } // Perform runs the http.Request through the provided transport and returns an http.Response. -func (r Inference) Perform(providedCtx context.Context) (*http.Response, error) { +func (r TextEmbedding) Perform(providedCtx context.Context) (*http.Response, error) { var ctx context.Context if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { if r.spanStarted == false { - ctx := instrument.Start(providedCtx, "inference.inference") + ctx := instrument.Start(providedCtx, "inference.text_embedding") defer instrument.Close(ctx) } } @@ -243,17 +223,17 @@ func (r Inference) Perform(providedCtx context.Context) (*http.Response, error) } if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.BeforeRequest(req, "inference.inference") - if reader := instrument.RecordRequestBody(ctx, "inference.inference", r.raw); reader != nil { + instrument.BeforeRequest(req, "inference.text_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.text_embedding", r.raw); reader != nil { req.Body = reader } } res, err := r.transport.Perform(req) if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.AfterRequest(req, "elasticsearch", "inference.inference") + instrument.AfterRequest(req, "elasticsearch", "inference.text_embedding") } if err != nil { - localErr := fmt.Errorf("an error happened during the Inference query execution: %w", err) + localErr := fmt.Errorf("an error happened during the TextEmbedding query execution: %w", err) if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, localErr) } @@ -263,12 +243,12 @@ func (r Inference) Perform(providedCtx context.Context) (*http.Response, error) return res, nil } -// Do runs the request through the transport, handle the response and returns a inference.Response -func (r Inference) Do(providedCtx context.Context) (*Response, error) { +// Do runs the request through the transport, handle the response and returns a textembedding.Response +func (r TextEmbedding) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - ctx = instrument.Start(providedCtx, "inference.inference") + ctx = instrument.Start(providedCtx, "inference.text_embedding") defer instrument.Close(ctx) } if ctx == nil { @@ -317,25 +297,16 @@ func (r Inference) Do(providedCtx context.Context) (*Response, error) { return nil, errorResponse } -// Header set a key, value pair in the Inference headers map. -func (r *Inference) Header(key, value string) *Inference { +// Header set a key, value pair in the TextEmbedding headers map. +func (r *TextEmbedding) Header(key, value string) *TextEmbedding { r.headers.Set(key, value) return r } -// TaskType The task type -// API Name: tasktype -func (r *Inference) TaskType(tasktype string) *Inference { - r.paramSet |= tasktypeMask - r.tasktype = tasktype - - return r -} - // InferenceId The inference Id // API Name: inferenceid -func (r *Inference) _inferenceid(inferenceid string) *Inference { +func (r *TextEmbedding) _inferenceid(inferenceid string) *TextEmbedding { r.paramSet |= inferenceidMask r.inferenceid = inferenceid @@ -344,7 +315,7 @@ func (r *Inference) _inferenceid(inferenceid string) *Inference { // Timeout Specifies the amount of time to wait for the inference request to complete. // API name: timeout -func (r *Inference) Timeout(duration string) *Inference { +func (r *TextEmbedding) Timeout(duration string) *TextEmbedding { r.values.Set("timeout", duration) return r @@ -353,7 +324,7 @@ func (r *Inference) Timeout(duration string) *Inference { // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace -func (r *Inference) ErrorTrace(errortrace bool) *Inference { +func (r *TextEmbedding) ErrorTrace(errortrace bool) *TextEmbedding { r.values.Set("error_trace", strconv.FormatBool(errortrace)) return r @@ -362,7 +333,7 @@ func (r *Inference) ErrorTrace(errortrace bool) *Inference { // FilterPath Comma-separated list of filters in dot notation which reduce the response // returned by Elasticsearch. // API name: filter_path -func (r *Inference) FilterPath(filterpaths ...string) *Inference { +func (r *TextEmbedding) FilterPath(filterpaths ...string) *TextEmbedding { tmp := []string{} for _, item := range filterpaths { tmp = append(tmp, fmt.Sprintf("%v", item)) @@ -379,7 +350,7 @@ func (r *Inference) FilterPath(filterpaths ...string) *Inference { // consumed // only by machines. // API name: human -func (r *Inference) Human(human bool) *Inference { +func (r *TextEmbedding) Human(human bool) *TextEmbedding { r.values.Set("human", strconv.FormatBool(human)) return r @@ -388,34 +359,34 @@ func (r *Inference) Human(human bool) *Inference { // Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use // this option for debugging only. // API name: pretty -func (r *Inference) Pretty(pretty bool) *Inference { +func (r *TextEmbedding) Pretty(pretty bool) *TextEmbedding { r.values.Set("pretty", strconv.FormatBool(pretty)) return r } -// Input Inference input. +// Inference input. // Either a string or an array of strings. // API name: input -func (r *Inference) Input(inputs ...string) *Inference { +func (r *TextEmbedding) Input(inputs ...string) *TextEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) r.req.Input = inputs return r } -// Query Query input, required for rerank task. -// Not required for other tasks. -// API name: query -func (r *Inference) Query(query string) *Inference { - - r.req.Query = &query - - return r -} - -// TaskSettings Optional task settings +// Optional task settings // API name: task_settings -func (r *Inference) TaskSettings(tasksettings json.RawMessage) *Inference { +func (r *TextEmbedding) TaskSettings(tasksettings json.RawMessage) *TextEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TaskSettings = tasksettings return r diff --git a/typedapi/inference/update/request.go b/typedapi/inference/update/request.go new file mode 100644 index 0000000000..1876f3e07a --- /dev/null +++ b/typedapi/inference/update/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package update + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package update +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/update/UpdateInferenceRequest.ts#L25-L61 +type Request = types.InferenceEndpoint + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewInferenceEndpoint() + + return r +} diff --git a/typedapi/inference/update/response.go b/typedapi/inference/update/response.go new file mode 100644 index 0000000000..fb5a8c305c --- /dev/null +++ b/typedapi/inference/update/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package update + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tasktype" +) + +// Response holds the response body struct for the package update +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/update/UpdateInferenceResponse.ts#L22-L24 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktype.TaskType `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/inference/update/update.go b/typedapi/inference/update/update.go new file mode 100644 index 0000000000..36e99fbfec --- /dev/null +++ b/typedapi/inference/update/update.go @@ -0,0 +1,469 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Update an inference endpoint. +// +// Modify `task_settings`, secrets (within `service_settings`), or +// `num_allocations` for an inference endpoint, depending on the specific +// endpoint service and `task_type`. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +package update + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 + + tasktypeMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Update struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + tasktype string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdate type alias for index. +type NewUpdate func(inferenceid string) *Update + +// NewUpdateFunc returns a new instance of Update with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFunc(tp elastictransport.Interface) NewUpdate { + return func(inferenceid string) *Update { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Update an inference endpoint. +// +// Modify `task_settings`, secrets (within `service_settings`), or +// `num_allocations` for an inference endpoint, depending on the specific +// endpoint service and `task_type`. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/update-inference-api.html +func New(tp elastictransport.Interface) *Update { + r := &Update{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Update) Raw(raw io.Reader) *Update { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Update) Request(req *Request) *Update { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Update) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Update: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPut + case r.paramSet == tasktypeMask|inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Update) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.update") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.update") + if reader := instrument.RecordRequestBody(ctx, "inference.update", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.update") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Update query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a update.Response +func (r Update) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.update") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Update headers map. +func (r *Update) Header(key, value string) *Update { + r.headers.Set(key, value) + + return r +} + +// InferenceId The unique identifier of the inference endpoint. +// API Name: inferenceid +func (r *Update) _inferenceid(inferenceid string) *Update { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// TaskType The type of inference task that the model performs. +// API Name: tasktype +func (r *Update) TaskType(tasktype string) *Update { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Update) ErrorTrace(errortrace bool) *Update { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Update) FilterPath(filterpaths ...string) *Update { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Update) Human(human bool) *Update { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Update) Pretty(pretty bool) *Update { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Chunking configuration object +// API name: chunking_settings +func (r *Update) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The service type +// API name: service +func (r *Update) Service(service string) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Service = service + + return r +} + +// Settings specific to the service +// API name: service_settings +func (r *Update) ServiceSettings(servicesettings json.RawMessage) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = servicesettings + + return r +} + +// Task settings specific to the service and task type +// API name: task_settings +func (r *Update) TaskSettings(tasksettings json.RawMessage) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go b/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go new file mode 100644 index 0000000000..847da67786 --- /dev/null +++ b/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Delete GeoIP database configurations. +// +// Delete one or more IP geolocation database configurations. +package deletegeoipdatabase + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteGeoipDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteGeoipDatabase type alias for index. +type NewDeleteGeoipDatabase func(id string) *DeleteGeoipDatabase + +// NewDeleteGeoipDatabaseFunc returns a new instance of DeleteGeoipDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteGeoipDatabaseFunc(tp elastictransport.Interface) NewDeleteGeoipDatabase { + return func(id string) *DeleteGeoipDatabase { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete GeoIP database configurations. +// +// Delete one or more IP geolocation database configurations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-delete-geoip-database +func New(tp elastictransport.Interface) *DeleteGeoipDatabase { + r := &DeleteGeoipDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteGeoipDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteGeoipDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.delete_geoip_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.delete_geoip_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.delete_geoip_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_geoip_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteGeoipDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletegeoipdatabase.Response +func (r DeleteGeoipDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteGeoipDatabase) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteGeoipDatabase query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteGeoipDatabase headers map. +func (r *DeleteGeoipDatabase) Header(key, value string) *DeleteGeoipDatabase { + r.headers.Set(key, value) + + return r +} + +// Id A comma-separated list of geoip database configurations to delete +// API Name: id +func (r *DeleteGeoipDatabase) _id(id string) *DeleteGeoipDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteGeoipDatabase) MasterTimeout(duration string) *DeleteGeoipDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteGeoipDatabase) Timeout(duration string) *DeleteGeoipDatabase { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteGeoipDatabase) ErrorTrace(errortrace bool) *DeleteGeoipDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteGeoipDatabase) FilterPath(filterpaths ...string) *DeleteGeoipDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteGeoipDatabase) Human(human bool) *DeleteGeoipDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteGeoipDatabase) Pretty(pretty bool) *DeleteGeoipDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/ingest/deletegeoipdatabase/response.go b/typedapi/ingest/deletegeoipdatabase/response.go new file mode 100644 index 0000000000..be897e6ad1 --- /dev/null +++ b/typedapi/ingest/deletegeoipdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package deletegeoipdatabase + +// Response holds the response body struct for the package deletegeoipdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/delete_geoip_database/DeleteGeoipDatabaseResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/ingest/deleteiplocationdatabase/delete_ip_location_database.go b/typedapi/ingest/deleteiplocationdatabase/delete_ip_location_database.go new file mode 100644 index 0000000000..da7772f712 --- /dev/null +++ b/typedapi/ingest/deleteiplocationdatabase/delete_ip_location_database.go @@ -0,0 +1,368 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Delete IP geolocation database configurations. +package deleteiplocationdatabase + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteIpLocationDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteIpLocationDatabase type alias for index. +type NewDeleteIpLocationDatabase func(id string) *DeleteIpLocationDatabase + +// NewDeleteIpLocationDatabaseFunc returns a new instance of DeleteIpLocationDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteIpLocationDatabaseFunc(tp elastictransport.Interface) NewDeleteIpLocationDatabase { + return func(id string) *DeleteIpLocationDatabase { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete IP geolocation database configurations. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-ip-location-database-api.html +func New(tp elastictransport.Interface) *DeleteIpLocationDatabase { + r := &DeleteIpLocationDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteIpLocationDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteIpLocationDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.delete_ip_location_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.delete_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.delete_ip_location_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_ip_location_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteIpLocationDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteiplocationdatabase.Response +func (r DeleteIpLocationDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteIpLocationDatabase) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteIpLocationDatabase query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteIpLocationDatabase headers map. +func (r *DeleteIpLocationDatabase) Header(key, value string) *DeleteIpLocationDatabase { + r.headers.Set(key, value) + + return r +} + +// Id A comma-separated list of IP location database configurations. +// API Name: id +func (r *DeleteIpLocationDatabase) _id(id string) *DeleteIpLocationDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// A value of `-1` indicates that the request should never time out. +// API name: master_timeout +func (r *DeleteIpLocationDatabase) MasterTimeout(duration string) *DeleteIpLocationDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// A value of `-1` indicates that the request should never time out. +// API name: timeout +func (r *DeleteIpLocationDatabase) Timeout(duration string) *DeleteIpLocationDatabase { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteIpLocationDatabase) ErrorTrace(errortrace bool) *DeleteIpLocationDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteIpLocationDatabase) FilterPath(filterpaths ...string) *DeleteIpLocationDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteIpLocationDatabase) Human(human bool) *DeleteIpLocationDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteIpLocationDatabase) Pretty(pretty bool) *DeleteIpLocationDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/ingest/deleteiplocationdatabase/response.go b/typedapi/ingest/deleteiplocationdatabase/response.go new file mode 100644 index 0000000000..903ef55d52 --- /dev/null +++ b/typedapi/ingest/deleteiplocationdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package deleteiplocationdatabase + +// Response holds the response body struct for the package deleteiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/delete_ip_location_database/DeleteIpLocationDatabaseResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/ingest/deletepipeline/delete_pipeline.go b/typedapi/ingest/deletepipeline/delete_pipeline.go index 412bc46de1..9fdaec55e0 100644 --- a/typedapi/ingest/deletepipeline/delete_pipeline.go +++ b/typedapi/ingest/deletepipeline/delete_pipeline.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes one or more existing ingest pipeline. +// Delete pipelines. +// Delete one or more ingest pipelines. package deletepipeline import ( @@ -76,7 +77,8 @@ func NewDeletePipelineFunc(tp elastictransport.Interface) NewDeletePipeline { } } -// Deletes one or more existing ingest pipeline. +// Delete pipelines. +// Delete one or more ingest pipelines. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-pipeline-api.html func New(tp elastictransport.Interface) *DeletePipeline { diff --git a/typedapi/ingest/deletepipeline/response.go b/typedapi/ingest/deletepipeline/response.go index 1465f59627..b71c3135a4 100644 --- a/typedapi/ingest/deletepipeline/response.go +++ b/typedapi/ingest/deletepipeline/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletepipeline // Response holds the response body struct for the package deletepipeline // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ingest/geoipstats/geo_ip_stats.go b/typedapi/ingest/geoipstats/geo_ip_stats.go index 52999cb393..727090a064 100644 --- a/typedapi/ingest/geoipstats/geo_ip_stats.go +++ b/typedapi/ingest/geoipstats/geo_ip_stats.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Gets download statistics for GeoIP2 databases used with the geoip processor. +// Get GeoIP statistics. +// Get download statistics for GeoIP2 databases that are used with the GeoIP +// processor. package geoipstats import ( @@ -68,7 +70,9 @@ func NewGeoIpStatsFunc(tp elastictransport.Interface) NewGeoIpStats { } } -// Gets download statistics for GeoIP2 databases used with the geoip processor. +// Get GeoIP statistics. +// Get download statistics for GeoIP2 databases that are used with the GeoIP +// processor. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html func New(tp elastictransport.Interface) *GeoIpStats { diff --git a/typedapi/ingest/geoipstats/response.go b/typedapi/ingest/geoipstats/response.go index 635edbd54b..6f953e883b 100644 --- a/typedapi/ingest/geoipstats/response.go +++ b/typedapi/ingest/geoipstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package geoipstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package geoipstats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/geo_ip_stats/IngestGeoIpStatsResponse.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/geo_ip_stats/IngestGeoIpStatsResponse.ts#L24-L31 type Response struct { // Nodes Downloaded GeoIP2 databases for each node. diff --git a/typedapi/ingest/getgeoipdatabase/get_geoip_database.go b/typedapi/ingest/getgeoipdatabase/get_geoip_database.go new file mode 100644 index 0000000000..c7417a7b84 --- /dev/null +++ b/typedapi/ingest/getgeoipdatabase/get_geoip_database.go @@ -0,0 +1,359 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get GeoIP database configurations. +// +// Get information about one or more IP geolocation database configurations. +package getgeoipdatabase + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetGeoipDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetGeoipDatabase type alias for index. +type NewGetGeoipDatabase func() *GetGeoipDatabase + +// NewGetGeoipDatabaseFunc returns a new instance of GetGeoipDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetGeoipDatabaseFunc(tp elastictransport.Interface) NewGetGeoipDatabase { + return func() *GetGeoipDatabase { + n := New(tp) + + return n + } +} + +// Get GeoIP database configurations. +// +// Get information about one or more IP geolocation database configurations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-get-geoip-database +func New(tp elastictransport.Interface) *GetGeoipDatabase { + r := &GetGeoipDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetGeoipDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + + method = http.MethodGet + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetGeoipDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.get_geoip_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_geoip_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.get_geoip_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_geoip_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetGeoipDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getgeoipdatabase.Response +func (r GetGeoipDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetGeoipDatabase) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetGeoipDatabase query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetGeoipDatabase headers map. +func (r *GetGeoipDatabase) Header(key, value string) *GetGeoipDatabase { + r.headers.Set(key, value) + + return r +} + +// Id A comma-separated list of database configuration IDs to retrieve. +// Wildcard (`*`) expressions are supported. +// To get all database configurations, omit this parameter or use `*`. +// API Name: id +func (r *GetGeoipDatabase) Id(id string) *GetGeoipDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetGeoipDatabase) ErrorTrace(errortrace bool) *GetGeoipDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetGeoipDatabase) FilterPath(filterpaths ...string) *GetGeoipDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetGeoipDatabase) Human(human bool) *GetGeoipDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetGeoipDatabase) Pretty(pretty bool) *GetGeoipDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/inference/inference/response.go b/typedapi/ingest/getgeoipdatabase/response.go similarity index 57% rename from typedapi/inference/inference/response.go rename to typedapi/ingest/getgeoipdatabase/response.go index 9b9b1b1434..e409080056 100644 --- a/typedapi/inference/inference/response.go +++ b/typedapi/ingest/getgeoipdatabase/response.go @@ -16,23 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -package inference +package getgeoipdatabase import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) -// Response holds the response body struct for the package inference +// Response holds the response body struct for the package getgeoipdatabase // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/inference/InferenceResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/get_geoip_database/GetGeoipDatabaseResponse.ts#L25-L27 type Response struct { - Completion []types.CompletionResult `json:"completion,omitempty"` - Rerank []types.RankedDocument `json:"rerank,omitempty"` - SparseEmbedding []types.SparseEmbeddingResult `json:"sparse_embedding,omitempty"` - TextEmbedding []types.TextEmbeddingResult `json:"text_embedding,omitempty"` - TextEmbeddingBytes []types.TextEmbeddingByteResult `json:"text_embedding_bytes,omitempty"` + Databases []types.GeoipDatabaseConfigurationMetadata `json:"databases"` } // NewResponse returns a Response diff --git a/typedapi/ingest/getiplocationdatabase/get_ip_location_database.go b/typedapi/ingest/getiplocationdatabase/get_ip_location_database.go new file mode 100644 index 0000000000..6acdb9f904 --- /dev/null +++ b/typedapi/ingest/getiplocationdatabase/get_ip_location_database.go @@ -0,0 +1,366 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get IP geolocation database configurations. +package getiplocationdatabase + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetIpLocationDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetIpLocationDatabase type alias for index. +type NewGetIpLocationDatabase func() *GetIpLocationDatabase + +// NewGetIpLocationDatabaseFunc returns a new instance of GetIpLocationDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetIpLocationDatabaseFunc(tp elastictransport.Interface) NewGetIpLocationDatabase { + return func() *GetIpLocationDatabase { + n := New(tp) + + return n + } +} + +// Get IP geolocation database configurations. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ip-location-database-api.html +func New(tp elastictransport.Interface) *GetIpLocationDatabase { + r := &GetIpLocationDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetIpLocationDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + + method = http.MethodGet + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetIpLocationDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.get_ip_location_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_ip_location_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetIpLocationDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getiplocationdatabase.Response +func (r GetIpLocationDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetIpLocationDatabase) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetIpLocationDatabase query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetIpLocationDatabase headers map. +func (r *GetIpLocationDatabase) Header(key, value string) *GetIpLocationDatabase { + r.headers.Set(key, value) + + return r +} + +// Id Comma-separated list of database configuration IDs to retrieve. +// Wildcard (`*`) expressions are supported. +// To get all database configurations, omit this parameter or use `*`. +// API Name: id +func (r *GetIpLocationDatabase) Id(id string) *GetIpLocationDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// A value of `-1` indicates that the request should never time out. +// API name: master_timeout +func (r *GetIpLocationDatabase) MasterTimeout(duration string) *GetIpLocationDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetIpLocationDatabase) ErrorTrace(errortrace bool) *GetIpLocationDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetIpLocationDatabase) FilterPath(filterpaths ...string) *GetIpLocationDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetIpLocationDatabase) Human(human bool) *GetIpLocationDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetIpLocationDatabase) Pretty(pretty bool) *GetIpLocationDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/ingest/getiplocationdatabase/response.go b/typedapi/ingest/getiplocationdatabase/response.go new file mode 100644 index 0000000000..19e9055164 --- /dev/null +++ b/typedapi/ingest/getiplocationdatabase/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package getiplocationdatabase + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/get_ip_location_database/GetIpLocationDatabaseResponse.ts#L24-L26 +type Response struct { + Databases []types.IpLocationDatabaseConfigurationMetadata `json:"databases"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/ingest/getpipeline/get_pipeline.go b/typedapi/ingest/getpipeline/get_pipeline.go index 517159f9b0..2c024b3de5 100644 --- a/typedapi/ingest/getpipeline/get_pipeline.go +++ b/typedapi/ingest/getpipeline/get_pipeline.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about one or more ingest pipelines. +// Get pipelines. +// +// Get information about one or more ingest pipelines. // This API returns a local reference of the pipeline. package getpipeline @@ -75,7 +77,9 @@ func NewGetPipelineFunc(tp elastictransport.Interface) NewGetPipeline { } } -// Returns information about one or more ingest pipelines. +// Get pipelines. +// +// Get information about one or more ingest pipelines. // This API returns a local reference of the pipeline. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-pipeline-api.html diff --git a/typedapi/ingest/getpipeline/response.go b/typedapi/ingest/getpipeline/response.go index 4398fb4529..109859c29d 100644 --- a/typedapi/ingest/getpipeline/response.go +++ b/typedapi/ingest/getpipeline/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getpipeline @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/get_pipeline/GetPipelineResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/get_pipeline/GetPipelineResponse.ts#L23-L26 type Response map[string]types.IngestPipeline diff --git a/typedapi/ingest/processorgrok/processor_grok.go b/typedapi/ingest/processorgrok/processor_grok.go index ba28bfd9cd..7403ec6d5d 100644 --- a/typedapi/ingest/processorgrok/processor_grok.go +++ b/typedapi/ingest/processorgrok/processor_grok.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Extracts structured fields out of a single text field within a document. -// You choose which field to extract matched fields from, as well as the grok -// pattern you expect will match. +// Run a grok processor. +// Extract structured fields out of a single text field within a document. +// You must choose which field to extract matched fields from, as well as the +// grok pattern you expect will match. // A grok pattern is like a regular expression that supports aliased expressions // that can be reused. package processorgrok @@ -72,9 +73,10 @@ func NewProcessorGrokFunc(tp elastictransport.Interface) NewProcessorGrok { } } -// Extracts structured fields out of a single text field within a document. -// You choose which field to extract matched fields from, as well as the grok -// pattern you expect will match. +// Run a grok processor. +// Extract structured fields out of a single text field within a document. +// You must choose which field to extract matched fields from, as well as the +// grok pattern you expect will match. // A grok pattern is like a regular expression that supports aliased expressions // that can be reused. // diff --git a/typedapi/ingest/processorgrok/response.go b/typedapi/ingest/processorgrok/response.go index a6863db25b..5416762da5 100644 --- a/typedapi/ingest/processorgrok/response.go +++ b/typedapi/ingest/processorgrok/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package processorgrok // Response holds the response body struct for the package processorgrok // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24 type Response struct { Patterns map[string]string `json:"patterns"` } diff --git a/typedapi/ingest/putgeoipdatabase/put_geoip_database.go b/typedapi/ingest/putgeoipdatabase/put_geoip_database.go new file mode 100644 index 0000000000..ff309ee5da --- /dev/null +++ b/typedapi/ingest/putgeoipdatabase/put_geoip_database.go @@ -0,0 +1,413 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Create or update a GeoIP database configuration. +// +// Refer to the create or update IP geolocation database configuration API. +package putgeoipdatabase + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutGeoipDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutGeoipDatabase type alias for index. +type NewPutGeoipDatabase func(id string) *PutGeoipDatabase + +// NewPutGeoipDatabaseFunc returns a new instance of PutGeoipDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutGeoipDatabaseFunc(tp elastictransport.Interface) NewPutGeoipDatabase { + return func(id string) *PutGeoipDatabase { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update a GeoIP database configuration. +// +// Refer to the create or update IP geolocation database configuration API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-put-geoip-database +func New(tp elastictransport.Interface) *PutGeoipDatabase { + r := &PutGeoipDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutGeoipDatabase) Raw(raw io.Reader) *PutGeoipDatabase { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutGeoipDatabase) Request(req *Request) *PutGeoipDatabase { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutGeoipDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutGeoipDatabase: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutGeoipDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.put_geoip_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_geoip_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_geoip_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_geoip_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutGeoipDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putgeoipdatabase.Response +func (r PutGeoipDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutGeoipDatabase headers map. +func (r *PutGeoipDatabase) Header(key, value string) *PutGeoipDatabase { + r.headers.Set(key, value) + + return r +} + +// Id ID of the database configuration to create or update. +// API Name: id +func (r *PutGeoipDatabase) _id(id string) *PutGeoipDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutGeoipDatabase) MasterTimeout(duration string) *PutGeoipDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PutGeoipDatabase) Timeout(duration string) *PutGeoipDatabase { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutGeoipDatabase) ErrorTrace(errortrace bool) *PutGeoipDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutGeoipDatabase) FilterPath(filterpaths ...string) *PutGeoipDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutGeoipDatabase) Human(human bool) *PutGeoipDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutGeoipDatabase) Pretty(pretty bool) *PutGeoipDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The configuration necessary to identify which IP geolocation provider to use +// to download the database, as well as any provider-specific configuration +// necessary for such downloading. +// At present, the only supported provider is maxmind, and the maxmind provider +// requires that an account_id (string) is configured. +// API name: maxmind +func (r *PutGeoipDatabase) Maxmind(maxmind types.MaxmindVariant) *PutGeoipDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Maxmind = *maxmind.MaxmindCaster() + + return r +} + +// The provider-assigned name of the IP geolocation database to download. +// API name: name +func (r *PutGeoipDatabase) Name(name string) *PutGeoipDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = name + + return r +} diff --git a/typedapi/ingest/putgeoipdatabase/request.go b/typedapi/ingest/putgeoipdatabase/request.go new file mode 100644 index 0000000000..27b220bcd2 --- /dev/null +++ b/typedapi/ingest/putgeoipdatabase/request.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package putgeoipdatabase + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package putgeoipdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/put_geoip_database/PutGeoipDatabaseRequest.ts#L25-L66 +type Request struct { + + // Maxmind The configuration necessary to identify which IP geolocation provider to use + // to download the database, as well as any provider-specific configuration + // necessary for such downloading. + // At present, the only supported provider is maxmind, and the maxmind provider + // requires that an account_id (string) is configured. + Maxmind types.Maxmind `json:"maxmind"` + // Name The provider-assigned name of the IP geolocation database to download. + Name string `json:"name"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putgeoipdatabase request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "maxmind": + if err := dec.Decode(&s.Maxmind); err != nil { + return fmt.Errorf("%s | %w", "Maxmind", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} diff --git a/typedapi/ingest/putgeoipdatabase/response.go b/typedapi/ingest/putgeoipdatabase/response.go new file mode 100644 index 0000000000..a66cc6fc8e --- /dev/null +++ b/typedapi/ingest/putgeoipdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package putgeoipdatabase + +// Response holds the response body struct for the package putgeoipdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/put_geoip_database/PutGeoipDatabaseResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/ingest/putiplocationdatabase/put_ip_location_database.go b/typedapi/ingest/putiplocationdatabase/put_ip_location_database.go new file mode 100644 index 0000000000..1a5de9a8fb --- /dev/null +++ b/typedapi/ingest/putiplocationdatabase/put_ip_location_database.go @@ -0,0 +1,438 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Create or update an IP geolocation database configuration. +package putiplocationdatabase + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutIpLocationDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutIpLocationDatabase type alias for index. +type NewPutIpLocationDatabase func(id string) *PutIpLocationDatabase + +// NewPutIpLocationDatabaseFunc returns a new instance of PutIpLocationDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutIpLocationDatabaseFunc(tp elastictransport.Interface) NewPutIpLocationDatabase { + return func(id string) *PutIpLocationDatabase { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update an IP geolocation database configuration. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-ip-location-database-api.html +func New(tp elastictransport.Interface) *PutIpLocationDatabase { + r := &PutIpLocationDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutIpLocationDatabase) Raw(raw io.Reader) *PutIpLocationDatabase { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutIpLocationDatabase) Request(req *Request) *PutIpLocationDatabase { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutIpLocationDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutIpLocationDatabase: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutIpLocationDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.put_ip_location_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_ip_location_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_ip_location_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutIpLocationDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putiplocationdatabase.Response +func (r PutIpLocationDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutIpLocationDatabase headers map. +func (r *PutIpLocationDatabase) Header(key, value string) *PutIpLocationDatabase { + r.headers.Set(key, value) + + return r +} + +// Id The database configuration identifier. +// API Name: id +func (r *PutIpLocationDatabase) _id(id string) *PutIpLocationDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// A value of `-1` indicates that the request should never time out. +// API name: master_timeout +func (r *PutIpLocationDatabase) MasterTimeout(duration string) *PutIpLocationDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response indicates that it was not completely +// acknowledged. +// A value of `-1` indicates that the request should never time out. +// API name: timeout +func (r *PutIpLocationDatabase) Timeout(duration string) *PutIpLocationDatabase { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutIpLocationDatabase) ErrorTrace(errortrace bool) *PutIpLocationDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutIpLocationDatabase) FilterPath(filterpaths ...string) *PutIpLocationDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutIpLocationDatabase) Human(human bool) *PutIpLocationDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutIpLocationDatabase) Pretty(pretty bool) *PutIpLocationDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: AdditionalDatabaseConfigurationProperty +// AdditionalDatabaseConfigurationProperty is a single key dictionnary. +// It will replace the current value on each call. +func (r *PutIpLocationDatabase) AdditionalDatabaseConfigurationProperty(key string, value json.RawMessage) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + r.req.AdditionalDatabaseConfigurationProperty = tmp + return r +} + +// API name: ipinfo +func (r *PutIpLocationDatabase) Ipinfo(ipinfo types.IpinfoVariant) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Ipinfo = ipinfo.IpinfoCaster() + + return r +} + +// API name: maxmind +func (r *PutIpLocationDatabase) Maxmind(maxmind types.MaxmindVariant) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Maxmind = maxmind.MaxmindCaster() + + return r +} + +// The provider-assigned name of the IP geolocation database to download. +// API name: name +func (r *PutIpLocationDatabase) Name(name string) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = name + + return r +} diff --git a/typedapi/ingest/putiplocationdatabase/request.go b/typedapi/ingest/putiplocationdatabase/request.go new file mode 100644 index 0000000000..cd5af3ff75 --- /dev/null +++ b/typedapi/ingest/putiplocationdatabase/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package putiplocationdatabase + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package putiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/put_ip_location_database/PutIpLocationDatabaseRequest.ts#L25-L62 +type Request = types.DatabaseConfiguration + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewDatabaseConfiguration() + + return r +} diff --git a/typedapi/ingest/putiplocationdatabase/response.go b/typedapi/ingest/putiplocationdatabase/response.go new file mode 100644 index 0000000000..ef3f383450 --- /dev/null +++ b/typedapi/ingest/putiplocationdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package putiplocationdatabase + +// Response holds the response body struct for the package putiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/put_ip_location_database/PutIpLocationDatabaseResponse.ts#L22-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/ingest/putpipeline/put_pipeline.go b/typedapi/ingest/putpipeline/put_pipeline.go index 26fb01f8a9..5df900d5c8 100644 --- a/typedapi/ingest/putpipeline/put_pipeline.go +++ b/typedapi/ingest/putpipeline/put_pipeline.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates an ingest pipeline. +// Create or update a pipeline. // Changes made using this API take effect immediately. package putpipeline @@ -82,7 +82,7 @@ func NewPutPipelineFunc(tp elastictransport.Interface) NewPutPipeline { } } -// Creates or updates an ingest pipeline. +// Create or update a pipeline. // Changes made using this API take effect immediately. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html @@ -93,8 +93,6 @@ func New(tp elastictransport.Interface) *PutPipeline { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -387,51 +385,95 @@ func (r *PutPipeline) Pretty(pretty bool) *PutPipeline { return r } -// Description Description of the ingest pipeline. +// Marks this ingest pipeline as deprecated. +// When a deprecated ingest pipeline is referenced as the default or final +// pipeline when creating or updating a non-deprecated index template, +// Elasticsearch will emit a deprecation warning. +// API name: deprecated +func (r *PutPipeline) Deprecated(deprecated bool) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Deprecated = &deprecated + + return r +} + +// Description of the ingest pipeline. // API name: description func (r *PutPipeline) Description(description string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Meta_ Optional metadata about the ingest pipeline. May have any contents. This map +// Optional metadata about the ingest pipeline. May have any contents. This map // is not automatically generated by Elasticsearch. // API name: _meta -func (r *PutPipeline) Meta_(metadata types.Metadata) *PutPipeline { - r.req.Meta_ = metadata +func (r *PutPipeline) Meta_(metadata types.MetadataVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// OnFailure Processors to run immediately after a processor failure. Each processor +// Processors to run immediately after a processor failure. Each processor // supports a processor-level `on_failure` value. If a processor without an // `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as // a fallback. The processors in this parameter run sequentially in the order // specified. Elasticsearch will not attempt to run the pipeline's remaining // processors. // API name: on_failure -func (r *PutPipeline) OnFailure(onfailures ...types.ProcessorContainer) *PutPipeline { - r.req.OnFailure = onfailures +func (r *PutPipeline) OnFailure(onfailures ...types.ProcessorContainerVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range onfailures { + + r.req.OnFailure = append(r.req.OnFailure, *v.ProcessorContainerCaster()) + } return r } -// Processors Processors used to perform transformations on documents before indexing. +// Processors used to perform transformations on documents before indexing. // Processors run sequentially in the order specified. // API name: processors -func (r *PutPipeline) Processors(processors ...types.ProcessorContainer) *PutPipeline { - r.req.Processors = processors +func (r *PutPipeline) Processors(processors ...types.ProcessorContainerVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range processors { + r.req.Processors = append(r.req.Processors, *v.ProcessorContainerCaster()) + + } return r } -// Version Version number used by external systems to track ingest pipelines. This +// Version number used by external systems to track ingest pipelines. This // parameter is intended for external systems only. Elasticsearch does not use // or validate pipeline version numbers. // API name: version func (r *PutPipeline) Version(versionnumber int64) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionnumber return r diff --git a/typedapi/ingest/putpipeline/request.go b/typedapi/ingest/putpipeline/request.go index 66dba321c9..b83a306839 100644 --- a/typedapi/ingest/putpipeline/request.go +++ b/typedapi/ingest/putpipeline/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putpipeline @@ -33,9 +33,14 @@ import ( // Request holds the request body struct for the package putpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/put_pipeline/PutPipelineRequest.ts#L25-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/put_pipeline/PutPipelineRequest.ts#L25-L90 type Request struct { + // Deprecated Marks this ingest pipeline as deprecated. + // When a deprecated ingest pipeline is referenced as the default or final + // pipeline when creating or updating a non-deprecated index template, + // Elasticsearch will emit a deprecation warning. + Deprecated *bool `json:"deprecated,omitempty"` // Description Description of the ingest pipeline. Description *string `json:"description,omitempty"` // Meta_ Optional metadata about the ingest pipeline. May have any contents. This map @@ -90,6 +95,20 @@ func (s *Request) UnmarshalJSON(data []byte) error { switch t { + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + case "description": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { diff --git a/typedapi/ingest/putpipeline/response.go b/typedapi/ingest/putpipeline/response.go index f24c02fe16..ccfc6a229d 100644 --- a/typedapi/ingest/putpipeline/response.go +++ b/typedapi/ingest/putpipeline/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putpipeline // Response holds the response body struct for the package putpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/put_pipeline/PutPipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/put_pipeline/PutPipelineResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ingest/simulate/request.go b/typedapi/ingest/simulate/request.go index bef29c246a..ab9cf1fe9a 100644 --- a/typedapi/ingest/simulate/request.go +++ b/typedapi/ingest/simulate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package simulate @@ -29,13 +29,13 @@ import ( // Request holds the request body struct for the package simulate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/simulate/SimulatePipelineRequest.ts#L25-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/simulate/SimulatePipelineRequest.ts#L25-L72 type Request struct { // Docs Sample documents to test in the pipeline. - Docs []types.Document `json:"docs,omitempty"` - // Pipeline Pipeline to test. - // If you don’t specify the `pipeline` request path parameter, this parameter is + Docs []types.Document `json:"docs"` + // Pipeline The pipeline to test. + // If you don't specify the `pipeline` request path parameter, this parameter is // required. // If you specify both this and the request path parameter, the API only uses // the request path parameter. diff --git a/typedapi/ingest/simulate/response.go b/typedapi/ingest/simulate/response.go index e09cb17fa8..1def508f3a 100644 --- a/typedapi/ingest/simulate/response.go +++ b/typedapi/ingest/simulate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package simulate @@ -26,9 +26,9 @@ import ( // Response holds the response body struct for the package simulate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/simulate/SimulatePipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/simulate/SimulatePipelineResponse.ts#L22-L24 type Response struct { - Docs []types.PipelineSimulation `json:"docs"` + Docs []types.SimulateDocumentResult `json:"docs"` } // NewResponse returns a Response diff --git a/typedapi/ingest/simulate/simulate.go b/typedapi/ingest/simulate/simulate.go index 15dc9dbbde..53faa59c55 100644 --- a/typedapi/ingest/simulate/simulate.go +++ b/typedapi/ingest/simulate/simulate.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Executes an ingest pipeline against a set of provided documents. +// Simulate a pipeline. +// +// Run an ingest pipeline against a set of provided documents. +// You can either specify an existing pipeline to use with the provided +// documents or supply a pipeline definition in the body of the request. package simulate import ( @@ -79,7 +83,11 @@ func NewSimulateFunc(tp elastictransport.Interface) NewSimulate { } } -// Executes an ingest pipeline against a set of provided documents. +// Simulate a pipeline. +// +// Run an ingest pipeline against a set of provided documents. +// You can either specify an existing pipeline to use with the provided +// documents or supply a pipeline definition in the body of the request. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/simulate-pipeline-api.html func New(tp elastictransport.Interface) *Simulate { @@ -89,8 +97,6 @@ func New(tp elastictransport.Interface) *Simulate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -315,8 +321,8 @@ func (r *Simulate) Header(key, value string) *Simulate { return r } -// Id Pipeline to test. -// If you don’t specify a `pipeline` in the request body, this parameter is +// Id The pipeline to test. +// If you don't specify a `pipeline` in the request body, this parameter is // required. // API Name: id func (r *Simulate) Id(id string) *Simulate { @@ -379,23 +385,34 @@ func (r *Simulate) Pretty(pretty bool) *Simulate { return r } -// Docs Sample documents to test in the pipeline. +// Sample documents to test in the pipeline. // API name: docs -func (r *Simulate) Docs(docs ...types.Document) *Simulate { - r.req.Docs = docs +func (r *Simulate) Docs(docs ...types.DocumentVariant) *Simulate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + r.req.Docs = append(r.req.Docs, *v.DocumentCaster()) + + } return r } -// Pipeline Pipeline to test. -// If you don’t specify the `pipeline` request path parameter, this parameter is +// The pipeline to test. +// If you don't specify the `pipeline` request path parameter, this parameter is // required. // If you specify both this and the request path parameter, the API only uses // the request path parameter. // API name: pipeline -func (r *Simulate) Pipeline(pipeline *types.IngestPipeline) *Simulate { +func (r *Simulate) Pipeline(pipeline types.IngestPipelineVariant) *Simulate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pipeline = pipeline + r.req.Pipeline = pipeline.IngestPipelineCaster() return r } diff --git a/typedapi/license/delete/delete.go b/typedapi/license/delete/delete.go index 6093a70024..f6b8de3e1e 100644 --- a/typedapi/license/delete/delete.go +++ b/typedapi/license/delete/delete.go @@ -16,9 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes licensing information for the cluster +// Delete the license. +// +// When the license expires, your subscription level reverts to Basic. +// +// If the operator privileges feature is enabled, only operator users can use +// this API. package delete import ( @@ -68,7 +73,12 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } } -// Deletes licensing information for the cluster +// Delete the license. +// +// When the license expires, your subscription level reverts to Basic. +// +// If the operator privileges feature is enabled, only operator users can use +// this API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-license.html func New(tp elastictransport.Interface) *Delete { @@ -274,6 +284,23 @@ func (r *Delete) Header(key, value string) *Delete { return r } +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *Delete) MasterTimeout(duration string) *Delete { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *Delete) Timeout(duration string) *Delete { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/license/delete/response.go b/typedapi/license/delete/response.go index 9c2aa7096f..cfce47a7b5 100644 --- a/typedapi/license/delete/response.go +++ b/typedapi/license/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/delete/DeleteLicenseResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/delete/DeleteLicenseResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/license/get/get.go b/typedapi/license/get/get.go index d53b7c456f..85e17ee4b6 100644 --- a/typedapi/license/get/get.go +++ b/typedapi/license/get/get.go @@ -16,13 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get license information. -// Returns information about your Elastic license, including its type, its -// status, when it was issued, and when it expires. -// For more information about the different types of licenses, refer to [Elastic -// Stack subscriptions](https://www.elastic.co/subscriptions). +// +// Get information about your Elastic license including its type, its status, +// when it was issued, and when it expires. +// +// >info +// > If the master node is generating a new cluster state, the get license API +// may return a `404 Not Found` response. +// > If you receive an unexpected 404 response after cluster startup, wait a +// short period and retry the request. package get import ( @@ -73,10 +78,15 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } // Get license information. -// Returns information about your Elastic license, including its type, its -// status, when it was issued, and when it expires. -// For more information about the different types of licenses, refer to [Elastic -// Stack subscriptions](https://www.elastic.co/subscriptions). +// +// Get information about your Elastic license including its type, its status, +// when it was issued, and when it expires. +// +// >info +// > If the master node is generating a new cluster state, the get license API +// may return a `404 Not Found` response. +// > If you receive an unexpected 404 response after cluster startup, wait a +// short period and retry the request. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-license.html func New(tp elastictransport.Interface) *Get { diff --git a/typedapi/license/get/response.go b/typedapi/license/get/response.go index 9aef6eced9..310b02f145 100644 --- a/typedapi/license/get/response.go +++ b/typedapi/license/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/get/GetLicenseResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/get/GetLicenseResponse.ts#L22-L24 type Response struct { License types.LicenseInformation `json:"license"` } diff --git a/typedapi/license/getbasicstatus/get_basic_status.go b/typedapi/license/getbasicstatus/get_basic_status.go index 996cba8869..4eb3ab2eef 100644 --- a/typedapi/license/getbasicstatus/get_basic_status.go +++ b/typedapi/license/getbasicstatus/get_basic_status.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information about the status of the basic license. +// Get the basic license status. package getbasicstatus import ( @@ -68,7 +68,7 @@ func NewGetBasicStatusFunc(tp elastictransport.Interface) NewGetBasicStatus { } } -// Retrieves information about the status of the basic license. +// Get the basic license status. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html func New(tp elastictransport.Interface) *GetBasicStatus { diff --git a/typedapi/license/getbasicstatus/response.go b/typedapi/license/getbasicstatus/response.go index 688355aef5..6fbf11fb4b 100644 --- a/typedapi/license/getbasicstatus/response.go +++ b/typedapi/license/getbasicstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getbasicstatus // Response holds the response body struct for the package getbasicstatus // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/get_basic_status/GetBasicLicenseStatusResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/get_basic_status/GetBasicLicenseStatusResponse.ts#L20-L22 type Response struct { EligibleToStartBasic bool `json:"eligible_to_start_basic"` } diff --git a/typedapi/license/gettrialstatus/get_trial_status.go b/typedapi/license/gettrialstatus/get_trial_status.go index e2f62eb026..3883d97f55 100644 --- a/typedapi/license/gettrialstatus/get_trial_status.go +++ b/typedapi/license/gettrialstatus/get_trial_status.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information about the status of the trial license. +// Get the trial status. package gettrialstatus import ( @@ -68,7 +68,7 @@ func NewGetTrialStatusFunc(tp elastictransport.Interface) NewGetTrialStatus { } } -// Retrieves information about the status of the trial license. +// Get the trial status. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trial-status.html func New(tp elastictransport.Interface) *GetTrialStatus { diff --git a/typedapi/license/gettrialstatus/response.go b/typedapi/license/gettrialstatus/response.go index adc2e06036..d297e14995 100644 --- a/typedapi/license/gettrialstatus/response.go +++ b/typedapi/license/gettrialstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package gettrialstatus // Response holds the response body struct for the package gettrialstatus // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/get_trial_status/GetTrialLicenseStatusResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/get_trial_status/GetTrialLicenseStatusResponse.ts#L20-L22 type Response struct { EligibleToStartTrial bool `json:"eligible_to_start_trial"` } diff --git a/typedapi/license/post/post.go b/typedapi/license/post/post.go index d74347b295..ee93757a06 100644 --- a/typedapi/license/post/post.go +++ b/typedapi/license/post/post.go @@ -16,9 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the license for the cluster. +// Update the license. +// +// You can update your license at runtime without shutting down your nodes. +// License updates take effect immediately. +// If the license you are installing does not support all of the features that +// were available with your previous license, however, you are notified in the +// response. +// You must then re-submit the API request with the acknowledge parameter set to +// true. +// +// NOTE: If Elasticsearch security features are enabled and you are installing a +// gold or higher license, you must enable TLS on the transport networking layer +// before you install the license. +// If the operator privileges feature is enabled, only operator users can use +// this API. package post import ( @@ -73,7 +87,21 @@ func NewPostFunc(tp elastictransport.Interface) NewPost { } } -// Updates the license for the cluster. +// Update the license. +// +// You can update your license at runtime without shutting down your nodes. +// License updates take effect immediately. +// If the license you are installing does not support all of the features that +// were available with your previous license, however, you are notified in the +// response. +// You must then re-submit the API request with the acknowledge parameter set to +// true. +// +// NOTE: If Elasticsearch security features are enabled and you are installing a +// gold or higher license, you must enable TLS on the transport networking layer +// before you install the license. +// If the operator privileges feature is enabled, only operator users can use +// this API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-license.html func New(tp elastictransport.Interface) *Post { @@ -83,8 +111,6 @@ func New(tp elastictransport.Interface) *Post { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -298,6 +324,23 @@ func (r *Post) Acknowledge(acknowledge bool) *Post { return r } +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *Post) MasterTimeout(duration string) *Post { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *Post) Timeout(duration string) *Post { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -343,17 +386,28 @@ func (r *Post) Pretty(pretty bool) *Post { } // API name: license -func (r *Post) License(license *types.License) *Post { +func (r *Post) License(license types.LicenseVariant) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.License = license + r.req.License = license.LicenseCaster() return r } -// Licenses A sequence of one or more JSON documents containing the license information. +// A sequence of one or more JSON documents containing the license information. // API name: licenses -func (r *Post) Licenses(licenses ...types.License) *Post { - r.req.Licenses = licenses +func (r *Post) Licenses(licenses ...types.LicenseVariant) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range licenses { + r.req.Licenses = append(r.req.Licenses, *v.LicenseCaster()) + + } return r } diff --git a/typedapi/license/post/request.go b/typedapi/license/post/request.go index 716b42b932..2d27bf8d72 100644 --- a/typedapi/license/post/request.go +++ b/typedapi/license/post/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package post @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/post/PostLicenseRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/post/PostLicenseRequest.ts#L24-L70 type Request struct { License *types.License `json:"license,omitempty"` // Licenses A sequence of one or more JSON documents containing the license information. diff --git a/typedapi/license/post/response.go b/typedapi/license/post/response.go index c8d90bea9e..cfd4139851 100644 --- a/typedapi/license/post/response.go +++ b/typedapi/license/post/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package post @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/post/PostLicenseResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/post/PostLicenseResponse.ts#L23-L29 type Response struct { Acknowledge *types.Acknowledgement `json:"acknowledge,omitempty"` Acknowledged bool `json:"acknowledged"` diff --git a/typedapi/license/poststartbasic/post_start_basic.go b/typedapi/license/poststartbasic/post_start_basic.go index 5bcb17a539..9e7bf9c895 100644 --- a/typedapi/license/poststartbasic/post_start_basic.go +++ b/typedapi/license/poststartbasic/post_start_basic.go @@ -16,15 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// The start basic API enables you to initiate an indefinite basic license, -// which gives access to all the basic features. If the basic license does not -// support all of the features that are available with your current license, -// however, you are notified in the response. You must then re-submit the API -// request with the acknowledge parameter set to true. -// To check the status of your basic license, use the following API: [Get basic -// status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Start a basic license. +// +// Start an indefinite basic license, which gives access to all the basic +// features. +// +// NOTE: In order to start a basic license, you must not currently have a basic +// license. +// +// If the basic license does not support all of the features that are available +// with your current license, however, you are notified in the response. +// You must then re-submit the API request with the `acknowledge` parameter set +// to `true`. +// +// To check the status of your basic license, use the get basic license API. package poststartbasic import ( @@ -74,13 +81,20 @@ func NewPostStartBasicFunc(tp elastictransport.Interface) NewPostStartBasic { } } -// The start basic API enables you to initiate an indefinite basic license, -// which gives access to all the basic features. If the basic license does not -// support all of the features that are available with your current license, -// however, you are notified in the response. You must then re-submit the API -// request with the acknowledge parameter set to true. -// To check the status of your basic license, use the following API: [Get basic -// status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). +// Start a basic license. +// +// Start an indefinite basic license, which gives access to all the basic +// features. +// +// NOTE: In order to start a basic license, you must not currently have a basic +// license. +// +// If the basic license does not support all of the features that are available +// with your current license, however, you are notified in the response. +// You must then re-submit the API request with the `acknowledge` parameter set +// to `true`. +// +// To check the status of your basic license, use the get basic license API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-basic.html func New(tp elastictransport.Interface) *PostStartBasic { @@ -296,6 +310,23 @@ func (r *PostStartBasic) Acknowledge(acknowledge bool) *PostStartBasic { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PostStartBasic) MasterTimeout(duration string) *PostStartBasic { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PostStartBasic) Timeout(duration string) *PostStartBasic { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/license/poststartbasic/response.go b/typedapi/license/poststartbasic/response.go index 3526be0e48..9d99c4105e 100644 --- a/typedapi/license/poststartbasic/response.go +++ b/typedapi/license/poststartbasic/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package poststartbasic @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package poststartbasic // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/post_start_basic/StartBasicLicenseResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/post_start_basic/StartBasicLicenseResponse.ts#L23-L31 type Response struct { Acknowledge map[string][]string `json:"acknowledge,omitempty"` Acknowledged bool `json:"acknowledged"` diff --git a/typedapi/license/poststarttrial/post_start_trial.go b/typedapi/license/poststarttrial/post_start_trial.go index 2a7a1e1d93..149739d368 100644 --- a/typedapi/license/poststarttrial/post_start_trial.go +++ b/typedapi/license/poststarttrial/post_start_trial.go @@ -16,10 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// The start trial API enables you to start a 30-day trial, which gives access -// to all subscription features. +// Start a trial. +// Start a 30-day trial, which gives access to all subscription features. +// +// NOTE: You are allowed to start a trial only if your cluster has not already +// activated a trial for the current major product version. +// For example, if you have already activated a trial for v8.0, you cannot start +// a new trial until v9.0. You can, however, request an extended trial at +// https://www.elastic.co/trialextension. +// +// To check the status of your trial, use the get trial status API. package poststarttrial import ( @@ -69,8 +77,16 @@ func NewPostStartTrialFunc(tp elastictransport.Interface) NewPostStartTrial { } } -// The start trial API enables you to start a 30-day trial, which gives access -// to all subscription features. +// Start a trial. +// Start a 30-day trial, which gives access to all subscription features. +// +// NOTE: You are allowed to start a trial only if your cluster has not already +// activated a trial for the current major product version. +// For example, if you have already activated a trial for v8.0, you cannot start +// a new trial until v9.0. You can, however, request an extended trial at +// https://www.elastic.co/trialextension. +// +// To check the status of your trial, use the get trial status API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trial.html func New(tp elastictransport.Interface) *PostStartTrial { @@ -293,6 +309,14 @@ func (r *PostStartTrial) TypeQueryString(typequerystring string) *PostStartTrial return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PostStartTrial) MasterTimeout(duration string) *PostStartTrial { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/license/poststarttrial/response.go b/typedapi/license/poststarttrial/response.go index 9db4650f60..5a2b1f9342 100644 --- a/typedapi/license/poststarttrial/response.go +++ b/typedapi/license/poststarttrial/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package poststarttrial @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package poststarttrial // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/post_start_trial/StartTrialLicenseResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/post_start_trial/StartTrialLicenseResponse.ts#L22-L29 type Response struct { Acknowledged bool `json:"acknowledged"` ErrorMessage *string `json:"error_message,omitempty"` diff --git a/typedapi/logstash/deletepipeline/delete_pipeline.go b/typedapi/logstash/deletepipeline/delete_pipeline.go index e27169ab11..7806c86715 100644 --- a/typedapi/logstash/deletepipeline/delete_pipeline.go +++ b/typedapi/logstash/deletepipeline/delete_pipeline.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a pipeline used for Logstash Central Management. +// Delete a Logstash pipeline. +// Delete a pipeline that is used for Logstash Central Management. +// If the request succeeds, you receive an empty response with an appropriate +// status code. package deletepipeline import ( @@ -74,7 +77,10 @@ func NewDeletePipelineFunc(tp elastictransport.Interface) NewDeletePipeline { } } -// Deletes a pipeline used for Logstash Central Management. +// Delete a Logstash pipeline. +// Delete a pipeline that is used for Logstash Central Management. +// If the request succeeds, you receive an empty response with an appropriate +// status code. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-delete-pipeline.html func New(tp elastictransport.Interface) *DeletePipeline { @@ -239,7 +245,7 @@ func (r *DeletePipeline) Header(key, value string) *DeletePipeline { return r } -// Id Identifier for the pipeline. +// Id An identifier for the pipeline. // API Name: id func (r *DeletePipeline) _id(id string) *DeletePipeline { r.paramSet |= idMask diff --git a/typedapi/logstash/getpipeline/get_pipeline.go b/typedapi/logstash/getpipeline/get_pipeline.go index 0ff6f9e5d1..bc7033eb03 100644 --- a/typedapi/logstash/getpipeline/get_pipeline.go +++ b/typedapi/logstash/getpipeline/get_pipeline.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves pipelines used for Logstash Central Management. +// Get Logstash pipelines. +// Get pipelines that are used for Logstash Central Management. package getpipeline import ( @@ -74,7 +75,8 @@ func NewGetPipelineFunc(tp elastictransport.Interface) NewGetPipeline { } } -// Retrieves pipelines used for Logstash Central Management. +// Get Logstash pipelines. +// Get pipelines that are used for Logstash Central Management. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get-pipeline.html func New(tp elastictransport.Interface) *GetPipeline { @@ -295,7 +297,7 @@ func (r *GetPipeline) Header(key, value string) *GetPipeline { return r } -// Id Comma-separated list of pipeline identifiers. +// Id A comma-separated list of pipeline identifiers. // API Name: id func (r *GetPipeline) Id(id string) *GetPipeline { r.paramSet |= idMask diff --git a/typedapi/logstash/getpipeline/response.go b/typedapi/logstash/getpipeline/response.go index 82ef7be116..eccb4061e2 100644 --- a/typedapi/logstash/getpipeline/response.go +++ b/typedapi/logstash/getpipeline/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getpipeline @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L27 type Response map[string]types.LogstashPipeline diff --git a/typedapi/logstash/putpipeline/put_pipeline.go b/typedapi/logstash/putpipeline/put_pipeline.go index 8b3a870375..8fe91300da 100644 --- a/typedapi/logstash/putpipeline/put_pipeline.go +++ b/typedapi/logstash/putpipeline/put_pipeline.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a pipeline used for Logstash Central Management. +// Create or update a Logstash pipeline. +// +// Create a pipeline that is used for Logstash Central Management. +// If the specified pipeline exists, it is replaced. package putpipeline import ( @@ -81,7 +84,10 @@ func NewPutPipelineFunc(tp elastictransport.Interface) NewPutPipeline { } } -// Creates or updates a pipeline used for Logstash Central Management. +// Create or update a Logstash pipeline. +// +// Create a pipeline that is used for Logstash Central Management. +// If the specified pipeline exists, it is replaced. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-put-pipeline.html func New(tp elastictransport.Interface) *PutPipeline { @@ -91,8 +97,6 @@ func New(tp elastictransport.Interface) *PutPipeline { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -252,7 +256,7 @@ func (r *PutPipeline) Header(key, value string) *PutPipeline { return r } -// Id Identifier for the pipeline. +// Id An identifier for the pipeline. // API Name: id func (r *PutPipeline) _id(id string) *PutPipeline { r.paramSet |= idMask @@ -305,58 +309,82 @@ func (r *PutPipeline) Pretty(pretty bool) *PutPipeline { return r } -// Description Description of the pipeline. +// A description of the pipeline. // This description is not used by Elasticsearch or Logstash. // API name: description func (r *PutPipeline) Description(description string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = description return r } -// LastModified Date the pipeline was last updated. -// Must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. +// The date the pipeline was last updated. +// It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. // API name: last_modified -func (r *PutPipeline) LastModified(datetime types.DateTime) *PutPipeline { - r.req.LastModified = datetime +func (r *PutPipeline) LastModified(datetime types.DateTimeVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastModified = *datetime.DateTimeCaster() return r } -// Pipeline Configuration for the pipeline. +// The configuration for the pipeline. // API name: pipeline func (r *PutPipeline) Pipeline(pipeline string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Pipeline = pipeline return r } -// PipelineMetadata Optional metadata about the pipeline. -// May have any contents. +// Optional metadata about the pipeline, which can have any contents. // This metadata is not generated or used by Elasticsearch or Logstash. // API name: pipeline_metadata -func (r *PutPipeline) PipelineMetadata(pipelinemetadata *types.PipelineMetadata) *PutPipeline { +func (r *PutPipeline) PipelineMetadata(pipelinemetadata types.PipelineMetadataVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PipelineMetadata = *pipelinemetadata + r.req.PipelineMetadata = *pipelinemetadata.PipelineMetadataCaster() return r } -// PipelineSettings Settings for the pipeline. -// Supports only flat keys in dot notation. +// Settings for the pipeline. +// It supports only flat keys in dot notation. // API name: pipeline_settings -func (r *PutPipeline) PipelineSettings(pipelinesettings *types.PipelineSettings) *PutPipeline { +func (r *PutPipeline) PipelineSettings(pipelinesettings types.PipelineSettingsVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PipelineSettings = *pipelinesettings + r.req.PipelineSettings = *pipelinesettings.PipelineSettingsCaster() return r } -// Username User who last updated the pipeline. +// The user who last updated the pipeline. // API name: username func (r *PutPipeline) Username(username string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Username = username diff --git a/typedapi/logstash/putpipeline/request.go b/typedapi/logstash/putpipeline/request.go index 357b036d64..b12db33736 100644 --- a/typedapi/logstash/putpipeline/request.go +++ b/typedapi/logstash/putpipeline/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putpipeline @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package putpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L51 type Request = types.LogstashPipeline // NewRequest returns a Request diff --git a/typedapi/migration/deprecations/deprecations.go b/typedapi/migration/deprecations/deprecations.go index acfc4e5fac..9b4f1b2254 100644 --- a/typedapi/migration/deprecations/deprecations.go +++ b/typedapi/migration/deprecations/deprecations.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information about different cluster, node, and index level settings -// that use deprecated features that will be removed or changed in the next -// major version. +// Get deprecation information. +// Get information about different cluster, node, and index level settings that +// use deprecated features that will be removed or changed in the next major +// version. +// +// TIP: This APIs is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. package deprecations import ( @@ -76,9 +80,13 @@ func NewDeprecationsFunc(tp elastictransport.Interface) NewDeprecations { } } -// Retrieves information about different cluster, node, and index level settings -// that use deprecated features that will be removed or changed in the next -// major version. +// Get deprecation information. +// Get information about different cluster, node, and index level settings that +// use deprecated features that will be removed or changed in the next major +// version. +// +// TIP: This APIs is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html func New(tp elastictransport.Interface) *Deprecations { diff --git a/typedapi/migration/deprecations/response.go b/typedapi/migration/deprecations/response.go index 4f2db77814..7710b54ff7 100644 --- a/typedapi/migration/deprecations/response.go +++ b/typedapi/migration/deprecations/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deprecations @@ -26,18 +26,39 @@ import ( // Response holds the response body struct for the package deprecations // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/migration/deprecations/DeprecationInfoResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/migration/deprecations/DeprecationInfoResponse.ts#L23-L54 type Response struct { + + // ClusterSettings Cluster-level deprecation warnings. ClusterSettings []types.Deprecation `json:"cluster_settings"` - IndexSettings map[string][]types.Deprecation `json:"index_settings"` - MlSettings []types.Deprecation `json:"ml_settings"` - NodeSettings []types.Deprecation `json:"node_settings"` + DataStreams map[string][]types.Deprecation `json:"data_streams"` + // IlmPolicies ILM policy warnings are sectioned off per policy. + IlmPolicies map[string][]types.Deprecation `json:"ilm_policies"` + // IndexSettings Index warnings are sectioned off per index and can be filtered using an + // index-pattern in the query. + // This section includes warnings for the backing indices of data streams + // specified in the request path. + IndexSettings map[string][]types.Deprecation `json:"index_settings"` + // MlSettings Machine learning-related deprecation warnings. + MlSettings []types.Deprecation `json:"ml_settings"` + // NodeSettings Node-level deprecation warnings. + // Since only a subset of your nodes might incorporate these settings, it is + // important to read the details section for more information about which nodes + // are affected. + NodeSettings []types.Deprecation `json:"node_settings"` + // Templates Template warnings are sectioned off per template and include deprecations for + // both component templates and + // index templates. + Templates map[string][]types.Deprecation `json:"templates"` } // NewResponse returns a Response func NewResponse() *Response { r := &Response{ + DataStreams: make(map[string][]types.Deprecation, 0), + IlmPolicies: make(map[string][]types.Deprecation, 0), IndexSettings: make(map[string][]types.Deprecation, 0), + Templates: make(map[string][]types.Deprecation, 0), } return r } diff --git a/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go b/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go index 2cd52a7ff5..435f58c108 100644 --- a/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go +++ b/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Find out whether system features need to be upgraded or not +// Get feature migration information. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// Check which features need to be migrated and the status of any migrations +// that are in progress. +// +// TIP: This API is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. package getfeatureupgradestatus import ( @@ -68,9 +75,16 @@ func NewGetFeatureUpgradeStatusFunc(tp elastictransport.Interface) NewGetFeature } } -// Find out whether system features need to be upgraded or not +// Get feature migration information. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// Check which features need to be migrated and the status of any migrations +// that are in progress. +// +// TIP: This API is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/feature-migration-api.html func New(tp elastictransport.Interface) *GetFeatureUpgradeStatus { r := &GetFeatureUpgradeStatus{ transport: tp, diff --git a/typedapi/migration/getfeatureupgradestatus/response.go b/typedapi/migration/getfeatureupgradestatus/response.go index 9f4a740a3b..44336ab020 100644 --- a/typedapi/migration/getfeatureupgradestatus/response.go +++ b/typedapi/migration/getfeatureupgradestatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getfeatureupgradestatus @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package getfeatureupgradestatus // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L23-L28 type Response struct { Features []types.GetMigrationFeature `json:"features"` MigrationStatus migrationstatus.MigrationStatus `json:"migration_status"` diff --git a/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go b/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go index b4d59f098a..ee1982ae8f 100644 --- a/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go +++ b/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go @@ -16,9 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Begin upgrades for system features +// Start the feature migration. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// This API starts the automatic migration process. +// +// Some functionality might be temporarily unavailable during the migration +// process. +// +// TIP: The API is designed for indirect use by the Upgrade Assistant. We +// strongly recommend you use the Upgrade Assistant. package postfeatureupgrade import ( @@ -68,9 +77,18 @@ func NewPostFeatureUpgradeFunc(tp elastictransport.Interface) NewPostFeatureUpgr } } -// Begin upgrades for system features +// Start the feature migration. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// This API starts the automatic migration process. +// +// Some functionality might be temporarily unavailable during the migration +// process. +// +// TIP: The API is designed for indirect use by the Upgrade Assistant. We +// strongly recommend you use the Upgrade Assistant. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/feature-migration-api.html func New(tp elastictransport.Interface) *PostFeatureUpgrade { r := &PostFeatureUpgrade{ transport: tp, diff --git a/typedapi/migration/postfeatureupgrade/response.go b/typedapi/migration/postfeatureupgrade/response.go index a61acc4a1b..88e9a1deaa 100644 --- a/typedapi/migration/postfeatureupgrade/response.go +++ b/typedapi/migration/postfeatureupgrade/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package postfeatureupgrade @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package postfeatureupgrade // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L20-L25 type Response struct { Accepted bool `json:"accepted"` Features []types.PostMigrationFeature `json:"features"` diff --git a/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go b/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go index d5e8e4b4c0..6348758f8c 100644 --- a/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go +++ b/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Clear trained model deployment cache. +// // Cache will be cleared on all nodes where the trained model is assigned. // A trained model deployment may have an inference cache enabled. // As requests are handled by each allocated node, their responses may be cached @@ -82,6 +83,7 @@ func NewClearTrainedModelDeploymentCacheFunc(tp elastictransport.Interface) NewC } // Clear trained model deployment cache. +// // Cache will be cleared on all nodes where the trained model is assigned. // A trained model deployment may have an inference cache enabled. // As requests are handled by each allocated node, their responses may be cached diff --git a/typedapi/ml/cleartrainedmodeldeploymentcache/response.go b/typedapi/ml/cleartrainedmodeldeploymentcache/response.go index 2c52ecc988..1a0cca89ae 100644 --- a/typedapi/ml/cleartrainedmodeldeploymentcache/response.go +++ b/typedapi/ml/cleartrainedmodeldeploymentcache/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package cleartrainedmodeldeploymentcache // Response holds the response body struct for the package cleartrainedmodeldeploymentcache // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/clear_trained_model_deployment_cache/MlClearTrainedModelDeploymentCacheResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/clear_trained_model_deployment_cache/MlClearTrainedModelDeploymentCacheResponse.ts#L20-L24 type Response struct { Cleared bool `json:"cleared"` } diff --git a/typedapi/ml/closejob/close_job.go b/typedapi/ml/closejob/close_job.go index d16dd2d810..ac8b34da22 100644 --- a/typedapi/ml/closejob/close_job.go +++ b/typedapi/ml/closejob/close_job.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Close anomaly detection jobs. +// // A job can be opened and closed multiple times throughout its lifecycle. A // closed job cannot receive data or perform analysis operations, but you can // still explore and navigate results. @@ -98,6 +99,7 @@ func NewCloseJobFunc(tp elastictransport.Interface) NewCloseJob { } // Close anomaly detection jobs. +// // A job can be opened and closed multiple times throughout its lifecycle. A // closed job cannot receive data or perform analysis operations, but you can // still explore and navigate results. @@ -123,8 +125,6 @@ func New(tp elastictransport.Interface) *CloseJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -397,26 +397,41 @@ func (r *CloseJob) Pretty(pretty bool) *CloseJob { return r } -// AllowNoMatch Refer to the description for the `allow_no_match` query parameter. +// Refer to the description for the `allow_no_match` query parameter. // API name: allow_no_match func (r *CloseJob) AllowNoMatch(allownomatch bool) *CloseJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowNoMatch = &allownomatch return r } -// Force Refer to the descriptiion for the `force` query parameter. +// Refer to the descriptiion for the `force` query parameter. // API name: force func (r *CloseJob) Force(force bool) *CloseJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Force = &force return r } -// Timeout Refer to the description for the `timeout` query parameter. +// Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *CloseJob) Timeout(duration types.Duration) *CloseJob { - r.req.Timeout = duration +func (r *CloseJob) Timeout(duration types.DurationVariant) *CloseJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/closejob/request.go b/typedapi/ml/closejob/request.go index 1c5147a0e4..6d5cd41957 100644 --- a/typedapi/ml/closejob/request.go +++ b/typedapi/ml/closejob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package closejob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package closejob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/close_job/MlCloseJobRequest.ts#L24-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/close_job/MlCloseJobRequest.ts#L24-L85 type Request struct { // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. diff --git a/typedapi/ml/closejob/response.go b/typedapi/ml/closejob/response.go index e54b0270e2..b6b2a36d86 100644 --- a/typedapi/ml/closejob/response.go +++ b/typedapi/ml/closejob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package closejob // Response holds the response body struct for the package closejob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/close_job/MlCloseJobResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/close_job/MlCloseJobResponse.ts#L20-L22 type Response struct { Closed bool `json:"closed"` } diff --git a/typedapi/ml/deletecalendar/delete_calendar.go b/typedapi/ml/deletecalendar/delete_calendar.go index 57e9e5de43..33ec0bf028 100644 --- a/typedapi/ml/deletecalendar/delete_calendar.go +++ b/typedapi/ml/deletecalendar/delete_calendar.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a calendar. -// Removes all scheduled events from a calendar, then deletes it. +// +// Remove all scheduled events from a calendar, then delete it. package deletecalendar import ( @@ -78,7 +79,8 @@ func NewDeleteCalendarFunc(tp elastictransport.Interface) NewDeleteCalendar { } // Delete a calendar. -// Removes all scheduled events from a calendar, then deletes it. +// +// Remove all scheduled events from a calendar, then delete it. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html func New(tp elastictransport.Interface) *DeleteCalendar { diff --git a/typedapi/ml/deletecalendar/response.go b/typedapi/ml/deletecalendar/response.go index 0a62addc76..a08a516939 100644 --- a/typedapi/ml/deletecalendar/response.go +++ b/typedapi/ml/deletecalendar/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletecalendar // Response holds the response body struct for the package deletecalendar // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_calendar/MlDeleteCalendarResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_calendar/MlDeleteCalendarResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletecalendarevent/delete_calendar_event.go b/typedapi/ml/deletecalendarevent/delete_calendar_event.go index 02d281d21f..656655f045 100644 --- a/typedapi/ml/deletecalendarevent/delete_calendar_event.go +++ b/typedapi/ml/deletecalendarevent/delete_calendar_event.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete events from a calendar. package deletecalendarevent diff --git a/typedapi/ml/deletecalendarevent/response.go b/typedapi/ml/deletecalendarevent/response.go index 0823f4d190..99eb0a2acf 100644 --- a/typedapi/ml/deletecalendarevent/response.go +++ b/typedapi/ml/deletecalendarevent/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletecalendarevent // Response holds the response body struct for the package deletecalendarevent // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_calendar_event/MlDeleteCalendarEventResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_calendar_event/MlDeleteCalendarEventResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletecalendarjob/delete_calendar_job.go b/typedapi/ml/deletecalendarjob/delete_calendar_job.go index bfb414fcde..fdec1449a9 100644 --- a/typedapi/ml/deletecalendarjob/delete_calendar_job.go +++ b/typedapi/ml/deletecalendarjob/delete_calendar_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete anomaly jobs from a calendar. package deletecalendarjob diff --git a/typedapi/ml/deletecalendarjob/response.go b/typedapi/ml/deletecalendarjob/response.go index 664d3b5527..827107ae35 100644 --- a/typedapi/ml/deletecalendarjob/response.go +++ b/typedapi/ml/deletecalendarjob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletecalendarjob @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package deletecalendarjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_calendar_job/MlDeleteCalendarJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_calendar_job/MlDeleteCalendarJobResponse.ts#L22-L31 type Response struct { // CalendarId A string that uniquely identifies a calendar. diff --git a/typedapi/ml/deletedatafeed/delete_datafeed.go b/typedapi/ml/deletedatafeed/delete_datafeed.go index cec39d36ea..d5eaa26c2a 100644 --- a/typedapi/ml/deletedatafeed/delete_datafeed.go +++ b/typedapi/ml/deletedatafeed/delete_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a datafeed. package deletedatafeed diff --git a/typedapi/ml/deletedatafeed/response.go b/typedapi/ml/deletedatafeed/response.go index 3531b72f0f..afcdf76bb9 100644 --- a/typedapi/ml/deletedatafeed/response.go +++ b/typedapi/ml/deletedatafeed/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletedatafeed // Response holds the response body struct for the package deletedatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_datafeed/MlDeleteDatafeedResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_datafeed/MlDeleteDatafeedResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go b/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go index d996302978..d20ab24d77 100644 --- a/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go +++ b/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a data frame analytics job. package deletedataframeanalytics diff --git a/typedapi/ml/deletedataframeanalytics/response.go b/typedapi/ml/deletedataframeanalytics/response.go index 6ef7c8a6cf..151e23a987 100644 --- a/typedapi/ml/deletedataframeanalytics/response.go +++ b/typedapi/ml/deletedataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletedataframeanalytics // Response holds the response body struct for the package deletedataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deleteexpireddata/delete_expired_data.go b/typedapi/ml/deleteexpireddata/delete_expired_data.go index b9ef8aa8ae..b8e13d9134 100644 --- a/typedapi/ml/deleteexpireddata/delete_expired_data.go +++ b/typedapi/ml/deleteexpireddata/delete_expired_data.go @@ -16,17 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete expired ML data. -// Deletes all job results, model snapshots and forecast data that have exceeded +// +// Delete all job results, model snapshots and forecast data that have exceeded // their retention days period. Machine learning state documents that are not // associated with any job are also deleted. // You can limit the request to a single or set of anomaly detection jobs by // using a job identifier, a group name, a comma-separated list of jobs, or a // wildcard expression. You can delete expired data for all anomaly detection -// jobs by using _all, by specifying * as the , or by omitting the -// . +// jobs by using `_all`, by specifying `*` as the ``, or by omitting the +// ``. package deleteexpireddata import ( @@ -88,14 +89,15 @@ func NewDeleteExpiredDataFunc(tp elastictransport.Interface) NewDeleteExpiredDat } // Delete expired ML data. -// Deletes all job results, model snapshots and forecast data that have exceeded +// +// Delete all job results, model snapshots and forecast data that have exceeded // their retention days period. Machine learning state documents that are not // associated with any job are also deleted. // You can limit the request to a single or set of anomaly detection jobs by // using a job identifier, a group name, a comma-separated list of jobs, or a // wildcard expression. You can delete expired data for all anomaly detection -// jobs by using _all, by specifying * as the , or by omitting the -// . +// jobs by using `_all`, by specifying `*` as the ``, or by omitting the +// ``. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html func New(tp elastictransport.Interface) *DeleteExpiredData { @@ -105,8 +107,6 @@ func New(tp elastictransport.Interface) *DeleteExpiredData { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -381,20 +381,29 @@ func (r *DeleteExpiredData) Pretty(pretty bool) *DeleteExpiredData { return r } -// RequestsPerSecond The desired requests per second for the deletion processes. The default +// The desired requests per second for the deletion processes. The default // behavior is no throttling. // API name: requests_per_second func (r *DeleteExpiredData) RequestsPerSecond(requestspersecond float32) *DeleteExpiredData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RequestsPerSecond = &requestspersecond return r } -// Timeout How long can the underlying delete processes run until they are canceled. +// How long can the underlying delete processes run until they are canceled. // API name: timeout -func (r *DeleteExpiredData) Timeout(duration types.Duration) *DeleteExpiredData { - r.req.Timeout = duration +func (r *DeleteExpiredData) Timeout(duration types.DurationVariant) *DeleteExpiredData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/deleteexpireddata/request.go b/typedapi/ml/deleteexpireddata/request.go index 3bfc650b3d..b737399d10 100644 --- a/typedapi/ml/deleteexpireddata/request.go +++ b/typedapi/ml/deleteexpireddata/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteexpireddata @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package deleteexpireddata // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_expired_data/MlDeleteExpiredDataRequest.ts#L25-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_expired_data/MlDeleteExpiredDataRequest.ts#L25-L85 type Request struct { // RequestsPerSecond The desired requests per second for the deletion processes. The default diff --git a/typedapi/ml/deleteexpireddata/response.go b/typedapi/ml/deleteexpireddata/response.go index bd9e7add95..e572ace45b 100644 --- a/typedapi/ml/deleteexpireddata/response.go +++ b/typedapi/ml/deleteexpireddata/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteexpireddata // Response holds the response body struct for the package deleteexpireddata // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_expired_data/MlDeleteExpiredDataResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_expired_data/MlDeleteExpiredDataResponse.ts#L20-L22 type Response struct { Deleted bool `json:"deleted"` } diff --git a/typedapi/ml/deletefilter/delete_filter.go b/typedapi/ml/deletefilter/delete_filter.go index 4a8bdb4679..d75ae4aa42 100644 --- a/typedapi/ml/deletefilter/delete_filter.go +++ b/typedapi/ml/deletefilter/delete_filter.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a filter. +// // If an anomaly detection job references the filter, you cannot delete the // filter. You must update or delete the job before you can delete the filter. package deletefilter @@ -79,6 +80,7 @@ func NewDeleteFilterFunc(tp elastictransport.Interface) NewDeleteFilter { } // Delete a filter. +// // If an anomaly detection job references the filter, you cannot delete the // filter. You must update or delete the job before you can delete the filter. // diff --git a/typedapi/ml/deletefilter/response.go b/typedapi/ml/deletefilter/response.go index ef4a154a9f..692b398c3f 100644 --- a/typedapi/ml/deletefilter/response.go +++ b/typedapi/ml/deletefilter/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletefilter // Response holds the response body struct for the package deletefilter // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_filter/MlDeleteFilterResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_filter/MlDeleteFilterResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deleteforecast/delete_forecast.go b/typedapi/ml/deleteforecast/delete_forecast.go index 7666c550e4..d94adc818f 100644 --- a/typedapi/ml/deleteforecast/delete_forecast.go +++ b/typedapi/ml/deleteforecast/delete_forecast.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete forecasts from a job. +// // By default, forecasts are retained for 14 days. You can specify a // different retention period with the `expires_in` parameter in the forecast // jobs API. The delete forecast API enables you to delete one or more @@ -84,6 +85,7 @@ func NewDeleteForecastFunc(tp elastictransport.Interface) NewDeleteForecast { } // Delete forecasts from a job. +// // By default, forecasts are retained for 14 days. You can specify a // different retention period with the `expires_in` parameter in the forecast // jobs API. The delete forecast API enables you to delete one or more diff --git a/typedapi/ml/deleteforecast/response.go b/typedapi/ml/deleteforecast/response.go index 9d31efad75..2e518152fe 100644 --- a/typedapi/ml/deleteforecast/response.go +++ b/typedapi/ml/deleteforecast/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteforecast // Response holds the response body struct for the package deleteforecast // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_forecast/MlDeleteForecastResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_forecast/MlDeleteForecastResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletejob/delete_job.go b/typedapi/ml/deletejob/delete_job.go index b9324ec699..40331dc046 100644 --- a/typedapi/ml/deletejob/delete_job.go +++ b/typedapi/ml/deletejob/delete_job.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete an anomaly detection job. +// // All job configuration, model state and results are deleted. // It is not currently possible to delete multiple jobs using wildcards or a // comma separated list. If you delete a job that has a datafeed, the request @@ -83,6 +84,7 @@ func NewDeleteJobFunc(tp elastictransport.Interface) NewDeleteJob { } // Delete an anomaly detection job. +// // All job configuration, model state and results are deleted. // It is not currently possible to delete multiple jobs using wildcards or a // comma separated list. If you delete a job that has a datafeed, the request diff --git a/typedapi/ml/deletejob/response.go b/typedapi/ml/deletejob/response.go index 5f6d22c0d2..dc99548241 100644 --- a/typedapi/ml/deletejob/response.go +++ b/typedapi/ml/deletejob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletejob // Response holds the response body struct for the package deletejob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_job/MlDeleteJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_job/MlDeleteJobResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go b/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go index c53037a835..4faa4bb30e 100644 --- a/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go +++ b/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a model snapshot. +// // You cannot delete the active model snapshot. To delete that snapshot, first // revert to a different one. To identify the active model snapshot, refer to // the `model_snapshot_id` in the results from the get jobs API. @@ -85,6 +86,7 @@ func NewDeleteModelSnapshotFunc(tp elastictransport.Interface) NewDeleteModelSna } // Delete a model snapshot. +// // You cannot delete the active model snapshot. To delete that snapshot, first // revert to a different one. To identify the active model snapshot, refer to // the `model_snapshot_id` in the results from the get jobs API. diff --git a/typedapi/ml/deletemodelsnapshot/response.go b/typedapi/ml/deletemodelsnapshot/response.go index b23fba44de..5cc366c627 100644 --- a/typedapi/ml/deletemodelsnapshot/response.go +++ b/typedapi/ml/deletemodelsnapshot/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletemodelsnapshot // Response holds the response body struct for the package deletemodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_model_snapshot/MlDeleteModelSnapshotResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_model_snapshot/MlDeleteModelSnapshotResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletetrainedmodel/delete_trained_model.go b/typedapi/ml/deletetrainedmodel/delete_trained_model.go index f8dfe76612..98c0fc353f 100644 --- a/typedapi/ml/deletetrainedmodel/delete_trained_model.go +++ b/typedapi/ml/deletetrainedmodel/delete_trained_model.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete an unreferenced trained model. +// // The request deletes a trained inference model that is not referenced by an // ingest pipeline. package deletetrainedmodel @@ -79,6 +80,7 @@ func NewDeleteTrainedModelFunc(tp elastictransport.Interface) NewDeleteTrainedMo } // Delete an unreferenced trained model. +// // The request deletes a trained inference model that is not referenced by an // ingest pipeline. // @@ -312,6 +314,15 @@ func (r *DeleteTrainedModel) Force(force bool) *DeleteTrainedModel { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteTrainedModel) Timeout(duration string) *DeleteTrainedModel { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/ml/deletetrainedmodel/response.go b/typedapi/ml/deletetrainedmodel/response.go index 3bbaa3e137..4d6675734d 100644 --- a/typedapi/ml/deletetrainedmodel/response.go +++ b/typedapi/ml/deletetrainedmodel/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletetrainedmodel // Response holds the response body struct for the package deletetrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_trained_model/MlDeleteTrainedModelResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_trained_model/MlDeleteTrainedModelResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go b/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go index 87ef1a70c3..282a67f76b 100644 --- a/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go +++ b/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a trained model alias. +// // This API deletes an existing model alias that refers to a trained model. If // the model alias is missing or refers to a model other than the one identified // by the `model_id`, this API returns an error. @@ -85,6 +86,7 @@ func NewDeleteTrainedModelAliasFunc(tp elastictransport.Interface) NewDeleteTrai } // Delete a trained model alias. +// // This API deletes an existing model alias that refers to a trained model. If // the model alias is missing or refers to a model other than the one identified // by the `model_id`, this API returns an error. diff --git a/typedapi/ml/deletetrainedmodelalias/response.go b/typedapi/ml/deletetrainedmodelalias/response.go index 556f94fe7d..7dc85e4a87 100644 --- a/typedapi/ml/deletetrainedmodelalias/response.go +++ b/typedapi/ml/deletetrainedmodelalias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletetrainedmodelalias // Response holds the response body struct for the package deletetrainedmodelalias // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/delete_trained_model_alias/MlDeleteTrainedModelAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/delete_trained_model_alias/MlDeleteTrainedModelAliasResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/estimatemodelmemory/estimate_model_memory.go b/typedapi/ml/estimatemodelmemory/estimate_model_memory.go index f2035b73a0..9358e4744e 100644 --- a/typedapi/ml/estimatemodelmemory/estimate_model_memory.go +++ b/typedapi/ml/estimatemodelmemory/estimate_model_memory.go @@ -16,11 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Estimate job model memory usage. -// Makes an estimation of the memory usage for an anomaly detection job model. -// It is based on analysis configuration details for the job and cardinality +// +// Make an estimation of the memory usage for an anomaly detection job model. +// The estimate is based on analysis configuration details for the job and +// cardinality // estimates for the fields it references. package estimatemodelmemory @@ -77,11 +79,13 @@ func NewEstimateModelMemoryFunc(tp elastictransport.Interface) NewEstimateModelM } // Estimate job model memory usage. -// Makes an estimation of the memory usage for an anomaly detection job model. -// It is based on analysis configuration details for the job and cardinality +// +// Make an estimation of the memory usage for an anomaly detection job model. +// The estimate is based on analysis configuration details for the job and +// cardinality // estimates for the fields it references. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-estimate-model-memory.html func New(tp elastictransport.Interface) *EstimateModelMemory { r := &EstimateModelMemory{ transport: tp, @@ -89,8 +93,6 @@ func New(tp elastictransport.Interface) *EstimateModelMemory { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -344,30 +346,55 @@ func (r *EstimateModelMemory) Pretty(pretty bool) *EstimateModelMemory { return r } -// AnalysisConfig For a list of the properties that you can specify in the +// For a list of the properties that you can specify in the // `analysis_config` component of the body of this API. // API name: analysis_config -func (r *EstimateModelMemory) AnalysisConfig(analysisconfig *types.AnalysisConfig) *EstimateModelMemory { +func (r *EstimateModelMemory) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisConfig = analysisconfig + r.req.AnalysisConfig = analysisconfig.AnalysisConfigCaster() return r } -// MaxBucketCardinality Estimates of the highest cardinality in a single bucket that is observed +// Estimates of the highest cardinality in a single bucket that is observed // for influencer fields over the time period that the job analyzes data. // To produce a good answer, values must be provided for all influencer // fields. Providing values for fields that are not listed as `influencers` // has no effect on the estimation. // API name: max_bucket_cardinality func (r *EstimateModelMemory) MaxBucketCardinality(maxbucketcardinality map[string]int64) *EstimateModelMemory { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxBucketCardinality = maxbucketcardinality + return r +} + +func (r *EstimateModelMemory) AddMaxBucketCardinality(key string, value int64) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]int64 + if r.req.MaxBucketCardinality == nil { + r.req.MaxBucketCardinality = make(map[string]int64) + } else { + tmp = r.req.MaxBucketCardinality + } + + tmp[key] = value + r.req.MaxBucketCardinality = tmp return r } -// OverallCardinality Estimates of the cardinality that is observed for fields over the whole +// Estimates of the cardinality that is observed for fields over the whole // time period that the job analyzes data. To produce a good answer, values // must be provided for fields referenced in the `by_field_name`, // `over_field_name` and `partition_field_name` of any detectors. Providing @@ -376,8 +403,29 @@ func (r *EstimateModelMemory) MaxBucketCardinality(maxbucketcardinality map[stri // `over_field_name` or `partition_field_name`. // API name: overall_cardinality func (r *EstimateModelMemory) OverallCardinality(overallcardinality map[string]int64) *EstimateModelMemory { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.OverallCardinality = overallcardinality + return r +} + +func (r *EstimateModelMemory) AddOverallCardinality(key string, value int64) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]int64 + if r.req.OverallCardinality == nil { + r.req.OverallCardinality = make(map[string]int64) + } else { + tmp = r.req.OverallCardinality + } + + tmp[key] = value + r.req.OverallCardinality = tmp return r } diff --git a/typedapi/ml/estimatemodelmemory/request.go b/typedapi/ml/estimatemodelmemory/request.go index 02d8771928..29130b5582 100644 --- a/typedapi/ml/estimatemodelmemory/request.go +++ b/typedapi/ml/estimatemodelmemory/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package estimatemodelmemory @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package estimatemodelmemory // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/estimate_model_memory/MlEstimateModelMemoryRequest.ts#L26-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/estimate_model_memory/MlEstimateModelMemoryRequest.ts#L26-L71 type Request struct { // AnalysisConfig For a list of the properties that you can specify in the diff --git a/typedapi/ml/estimatemodelmemory/response.go b/typedapi/ml/estimatemodelmemory/response.go index ffdb6ebf7d..b631cd348d 100644 --- a/typedapi/ml/estimatemodelmemory/response.go +++ b/typedapi/ml/estimatemodelmemory/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package estimatemodelmemory // Response holds the response body struct for the package estimatemodelmemory // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/estimate_model_memory/MlEstimateModelMemoryResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/estimate_model_memory/MlEstimateModelMemoryResponse.ts#L20-L24 type Response struct { ModelMemoryEstimate string `json:"model_memory_estimate"` } diff --git a/typedapi/ml/evaluatedataframe/evaluate_data_frame.go b/typedapi/ml/evaluatedataframe/evaluate_data_frame.go index 2a5a873d35..eb7240c399 100644 --- a/typedapi/ml/evaluatedataframe/evaluate_data_frame.go +++ b/typedapi/ml/evaluatedataframe/evaluate_data_frame.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Evaluate data frame analytics. +// // The API packages together commonly used evaluation metrics for various types // of machine learning features. This has been designed for use on indexes // created by data frame analytics. Evaluation requires both a ground truth @@ -78,6 +79,7 @@ func NewEvaluateDataFrameFunc(tp elastictransport.Interface) NewEvaluateDataFram } // Evaluate data frame analytics. +// // The API packages together commonly used evaluation metrics for various types // of machine learning features. This has been designed for use on indexes // created by data frame analytics. Evaluation requires both a ground truth @@ -91,8 +93,6 @@ func New(tp elastictransport.Interface) *EvaluateDataFrame { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -346,28 +346,41 @@ func (r *EvaluateDataFrame) Pretty(pretty bool) *EvaluateDataFrame { return r } -// Evaluation Defines the type of evaluation you want to perform. +// Defines the type of evaluation you want to perform. // API name: evaluation -func (r *EvaluateDataFrame) Evaluation(evaluation *types.DataframeEvaluationContainer) *EvaluateDataFrame { +func (r *EvaluateDataFrame) Evaluation(evaluation types.DataframeEvaluationContainerVariant) *EvaluateDataFrame { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Evaluation = *evaluation + r.req.Evaluation = *evaluation.DataframeEvaluationContainerCaster() return r } -// Index Defines the `index` in which the evaluation will be performed. +// Defines the `index` in which the evaluation will be performed. // API name: index func (r *EvaluateDataFrame) Index(indexname string) *EvaluateDataFrame { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Index = indexname return r } -// Query A query clause that retrieves a subset of data from the source index. +// A query clause that retrieves a subset of data from the source index. // API name: query -func (r *EvaluateDataFrame) Query(query *types.Query) *EvaluateDataFrame { +func (r *EvaluateDataFrame) Query(query types.QueryVariant) *EvaluateDataFrame { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } diff --git a/typedapi/ml/evaluatedataframe/request.go b/typedapi/ml/evaluatedataframe/request.go index 631c9c38e1..7f888b3494 100644 --- a/typedapi/ml/evaluatedataframe/request.go +++ b/typedapi/ml/evaluatedataframe/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package evaluatedataframe @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package evaluatedataframe // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L61 type Request struct { // Evaluation Defines the type of evaluation you want to perform. diff --git a/typedapi/ml/evaluatedataframe/response.go b/typedapi/ml/evaluatedataframe/response.go index 79a1205efa..7113f9b0f9 100644 --- a/typedapi/ml/evaluatedataframe/response.go +++ b/typedapi/ml/evaluatedataframe/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package evaluatedataframe @@ -26,11 +26,19 @@ import ( // Response holds the response body struct for the package evaluatedataframe // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/MlEvaluateDataFrameResponse.ts#L26-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/MlEvaluateDataFrameResponse.ts#L26-L44 type Response struct { - Classification *types.DataframeClassificationSummary `json:"classification,omitempty"` + + // Classification Evaluation results for a classification analysis. + // It outputs a prediction that identifies to which of the classes each document + // belongs. + Classification *types.DataframeClassificationSummary `json:"classification,omitempty"` + // OutlierDetection Evaluation results for an outlier detection analysis. + // It outputs the probability that each document is an outlier. OutlierDetection *types.DataframeOutlierDetectionSummary `json:"outlier_detection,omitempty"` - Regression *types.DataframeRegressionSummary `json:"regression,omitempty"` + // Regression Evaluation results for a regression analysis which outputs a prediction of + // values. + Regression *types.DataframeRegressionSummary `json:"regression,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go b/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go index 8bcaa2f27f..63ff2912d4 100644 --- a/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go +++ b/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Explain data frame analytics config. +// // This API provides explanations for a data frame analytics config that either // exists already or one that has not been created yet. The following // explanations are provided: @@ -88,6 +89,7 @@ func NewExplainDataFrameAnalyticsFunc(tp elastictransport.Interface) NewExplainD } // Explain data frame analytics config. +// // This API provides explanations for a data frame analytics config that either // exists already or one that has not been created yet. The following // explanations are provided: @@ -97,7 +99,7 @@ func NewExplainDataFrameAnalyticsFunc(tp elastictransport.Interface) NewExplainD // If you have object fields or fields that are excluded via source filtering, // they are not included in the explanation. // -// http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html func New(tp elastictransport.Interface) *ExplainDataFrameAnalytics { r := &ExplainDataFrameAnalytics{ transport: tp, @@ -105,8 +107,6 @@ func New(tp elastictransport.Interface) *ExplainDataFrameAnalytics { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -390,88 +390,122 @@ func (r *ExplainDataFrameAnalytics) Pretty(pretty bool) *ExplainDataFrameAnalyti return r } -// AllowLazyStart Specifies whether this job can start when there is insufficient machine +// Specifies whether this job can start when there is insufficient machine // learning node capacity for it to be immediately assigned to a node. // API name: allow_lazy_start func (r *ExplainDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyStart = &allowlazystart return r } -// Analysis The analysis configuration, which contains the information necessary to +// The analysis configuration, which contains the information necessary to // perform one of the following types of analysis: classification, outlier // detection, or regression. // API name: analysis -func (r *ExplainDataFrameAnalytics) Analysis(analysis *types.DataframeAnalysisContainer) *ExplainDataFrameAnalytics { +func (r *ExplainDataFrameAnalytics) Analysis(analysis types.DataframeAnalysisContainerVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Analysis = analysis + r.req.Analysis = analysis.DataframeAnalysisContainerCaster() return r } -// AnalyzedFields Specify includes and/or excludes patterns to select which fields will be +// Specify includes and/or excludes patterns to select which fields will be // included in the analysis. The patterns specified in excludes are applied // last, therefore excludes takes precedence. In other words, if the same // field is specified in both includes and excludes, then the field will not // be included in the analysis. // API name: analyzed_fields -func (r *ExplainDataFrameAnalytics) AnalyzedFields(analyzedfields *types.DataframeAnalysisAnalyzedFields) *ExplainDataFrameAnalytics { +func (r *ExplainDataFrameAnalytics) AnalyzedFields(analyzedfields types.DataframeAnalysisAnalyzedFieldsVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalyzedFields = analyzedfields + r.req.AnalyzedFields = analyzedfields.DataframeAnalysisAnalyzedFieldsCaster() return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *ExplainDataFrameAnalytics) Description(description string) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination configuration, consisting of index and optionally +// The destination configuration, consisting of index and optionally // results_field (ml by default). // API name: dest -func (r *ExplainDataFrameAnalytics) Dest(dest *types.DataframeAnalyticsDestination) *ExplainDataFrameAnalytics { +func (r *ExplainDataFrameAnalytics) Dest(dest types.DataframeAnalyticsDestinationVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = dest + r.req.Dest = dest.DataframeAnalyticsDestinationCaster() return r } -// MaxNumThreads The maximum number of threads to be used by the analysis. Using more +// The maximum number of threads to be used by the analysis. Using more // threads may decrease the time necessary to complete the analysis at the // cost of using more CPU. Note that the process may use additional threads // for operational functionality other than the analysis itself. // API name: max_num_threads func (r *ExplainDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxNumThreads = &maxnumthreads return r } -// ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for +// The approximate maximum amount of memory resources that are permitted for // analytical processing. If your `elasticsearch.yml` file contains an // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to // create data frame analytics jobs that have `model_memory_limit` values // greater than that setting. // API name: model_memory_limit func (r *ExplainDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelMemoryLimit = &modelmemorylimit return r } -// Source The configuration of how to source the analysis data. It requires an +// The configuration of how to source the analysis data. It requires an // index. Optionally, query and _source may be specified. // API name: source -func (r *ExplainDataFrameAnalytics) Source(source *types.DataframeAnalyticsSource) *ExplainDataFrameAnalytics { +func (r *ExplainDataFrameAnalytics) Source(source types.DataframeAnalyticsSourceVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = source + r.req.Source = source.DataframeAnalyticsSourceCaster() return r } diff --git a/typedapi/ml/explaindataframeanalytics/request.go b/typedapi/ml/explaindataframeanalytics/request.go index d81279d591..6b2c816c80 100644 --- a/typedapi/ml/explaindataframeanalytics/request.go +++ b/typedapi/ml/explaindataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package explaindataframeanalytics @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package explaindataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsRequest.ts#L30-L107 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsRequest.ts#L30-L120 type Request struct { // AllowLazyStart Specifies whether this job can start when there is insufficient machine diff --git a/typedapi/ml/explaindataframeanalytics/response.go b/typedapi/ml/explaindataframeanalytics/response.go index 9ad2497b76..77a615474d 100644 --- a/typedapi/ml/explaindataframeanalytics/response.go +++ b/typedapi/ml/explaindataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package explaindataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explaindataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsResponse.ts#L25-L32 type Response struct { // FieldSelection An array of objects that explain selection for each field, sorted by the diff --git a/typedapi/ml/flushjob/flush_job.go b/typedapi/ml/flushjob/flush_job.go index 8ad2f499e0..7586331e16 100644 --- a/typedapi/ml/flushjob/flush_job.go +++ b/typedapi/ml/flushjob/flush_job.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Forces any buffered data to be processed by the job. +// Force buffered data to be processed. // The flush jobs API is only applicable when sending data for analysis using // the post data API. Depending on the content of the buffer, then it might // additionally calculate new results. Both flush and close operations are @@ -89,7 +89,7 @@ func NewFlushJobFunc(tp elastictransport.Interface) NewFlushJob { } } -// Forces any buffered data to be processed by the job. +// Force buffered data to be processed. // The flush jobs API is only applicable when sending data for analysis using // the post data API. Depending on the content of the buffer, then it might // additionally calculate new results. Both flush and close operations are @@ -107,8 +107,6 @@ func New(tp elastictransport.Interface) *FlushJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -377,42 +375,67 @@ func (r *FlushJob) Pretty(pretty bool) *FlushJob { return r } -// AdvanceTime Refer to the description for the `advance_time` query parameter. +// Refer to the description for the `advance_time` query parameter. // API name: advance_time -func (r *FlushJob) AdvanceTime(datetime types.DateTime) *FlushJob { - r.req.AdvanceTime = datetime +func (r *FlushJob) AdvanceTime(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdvanceTime = *datetime.DateTimeCaster() return r } -// CalcInterim Refer to the description for the `calc_interim` query parameter. +// Refer to the description for the `calc_interim` query parameter. // API name: calc_interim func (r *FlushJob) CalcInterim(calcinterim bool) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CalcInterim = &calcinterim return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *FlushJob) End(datetime types.DateTime) *FlushJob { - r.req.End = datetime +func (r *FlushJob) End(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// SkipTime Refer to the description for the `skip_time` query parameter. +// Refer to the description for the `skip_time` query parameter. // API name: skip_time -func (r *FlushJob) SkipTime(datetime types.DateTime) *FlushJob { - r.req.SkipTime = datetime +func (r *FlushJob) SkipTime(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SkipTime = *datetime.DateTimeCaster() return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *FlushJob) Start(datetime types.DateTime) *FlushJob { - r.req.Start = datetime +func (r *FlushJob) Start(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } diff --git a/typedapi/ml/flushjob/request.go b/typedapi/ml/flushjob/request.go index 9dbdafaa2d..aca8ee8c32 100644 --- a/typedapi/ml/flushjob/request.go +++ b/typedapi/ml/flushjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package flushjob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package flushjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/flush_job/MlFlushJobRequest.ts#L24-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/flush_job/MlFlushJobRequest.ts#L24-L107 type Request struct { // AdvanceTime Refer to the description for the `advance_time` query parameter. diff --git a/typedapi/ml/flushjob/response.go b/typedapi/ml/flushjob/response.go index 97c257a9e5..8e57476ba4 100644 --- a/typedapi/ml/flushjob/response.go +++ b/typedapi/ml/flushjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package flushjob // Response holds the response body struct for the package flushjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/flush_job/MlFlushJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/flush_job/MlFlushJobResponse.ts#L22-L31 type Response struct { Flushed bool `json:"flushed"` // LastFinalizedBucketEnd Provides the timestamp (in milliseconds since the epoch) of the end of diff --git a/typedapi/ml/forecast/forecast.go b/typedapi/ml/forecast/forecast.go index edcd08b518..e3c87da36d 100644 --- a/typedapi/ml/forecast/forecast.go +++ b/typedapi/ml/forecast/forecast.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Predicts the future behavior of a time series by using its historical -// behavior. +// Predict future behavior of a time series. // // Forecasts are not supported for jobs that perform population analysis; an // error occurs if you try to create a forecast for a job that has an -// `over_field_name` in its configuration. +// `over_field_name` in its configuration. Forcasts predict future behavior +// based on historical data. package forecast import ( @@ -86,12 +86,12 @@ func NewForecastFunc(tp elastictransport.Interface) NewForecast { } } -// Predicts the future behavior of a time series by using its historical -// behavior. +// Predict future behavior of a time series. // // Forecasts are not supported for jobs that perform population analysis; an // error occurs if you try to create a forecast for a job that has an -// `over_field_name` in its configuration. +// `over_field_name` in its configuration. Forcasts predict future behavior +// based on historical data. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html func New(tp elastictransport.Interface) *Forecast { @@ -101,8 +101,6 @@ func New(tp elastictransport.Interface) *Forecast { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -372,25 +370,39 @@ func (r *Forecast) Pretty(pretty bool) *Forecast { return r } -// Duration Refer to the description for the `duration` query parameter. +// Refer to the description for the `duration` query parameter. // API name: duration -func (r *Forecast) Duration(duration types.Duration) *Forecast { - r.req.Duration = duration +func (r *Forecast) Duration(duration types.DurationVariant) *Forecast { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Duration = *duration.DurationCaster() return r } -// ExpiresIn Refer to the description for the `expires_in` query parameter. +// Refer to the description for the `expires_in` query parameter. // API name: expires_in -func (r *Forecast) ExpiresIn(duration types.Duration) *Forecast { - r.req.ExpiresIn = duration +func (r *Forecast) ExpiresIn(duration types.DurationVariant) *Forecast { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ExpiresIn = *duration.DurationCaster() return r } -// MaxModelMemory Refer to the description for the `max_model_memory` query parameter. +// Refer to the description for the `max_model_memory` query parameter. // API name: max_model_memory func (r *Forecast) MaxModelMemory(maxmodelmemory string) *Forecast { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.MaxModelMemory = &maxmodelmemory diff --git a/typedapi/ml/forecast/request.go b/typedapi/ml/forecast/request.go index 2432a524cf..07e5e9d1da 100644 --- a/typedapi/ml/forecast/request.go +++ b/typedapi/ml/forecast/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package forecast @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package forecast // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/forecast/MlForecastJobRequest.ts#L24-L87 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/forecast/MlForecastJobRequest.ts#L24-L95 type Request struct { // Duration Refer to the description for the `duration` query parameter. diff --git a/typedapi/ml/forecast/response.go b/typedapi/ml/forecast/response.go index d537ff8271..abc39e0810 100644 --- a/typedapi/ml/forecast/response.go +++ b/typedapi/ml/forecast/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package forecast // Response holds the response body struct for the package forecast // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/forecast/MlForecastJobResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/forecast/MlForecastJobResponse.ts#L22-L27 type Response struct { Acknowledged bool `json:"acknowledged"` ForecastId string `json:"forecast_id"` diff --git a/typedapi/ml/getbuckets/get_buckets.go b/typedapi/ml/getbuckets/get_buckets.go index 0a96d8cc9f..d047fca0b7 100644 --- a/typedapi/ml/getbuckets/get_buckets.go +++ b/typedapi/ml/getbuckets/get_buckets.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves anomaly detection job results for one or more buckets. +// Get anomaly detection job results for buckets. // The API presents a chronological view of the records, grouped by bucket. package getbuckets @@ -85,7 +85,7 @@ func NewGetBucketsFunc(tp elastictransport.Interface) NewGetBuckets { } } -// Retrieves anomaly detection job results for one or more buckets. +// Get anomaly detection job results for buckets. // The API presents a chronological view of the records, grouped by bucket. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html @@ -96,8 +96,6 @@ func New(tp elastictransport.Interface) *GetBuckets { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -417,67 +415,105 @@ func (r *GetBuckets) Pretty(pretty bool) *GetBuckets { return r } -// AnomalyScore Refer to the description for the `anomaly_score` query parameter. +// Refer to the description for the `anomaly_score` query parameter. // API name: anomaly_score func (r *GetBuckets) AnomalyScore(anomalyscore types.Float64) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AnomalyScore = &anomalyscore return r } -// Desc Refer to the description for the `desc` query parameter. +// Refer to the description for the `desc` query parameter. // API name: desc func (r *GetBuckets) Desc(desc bool) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Desc = &desc return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *GetBuckets) End(datetime types.DateTime) *GetBuckets { - r.req.End = datetime +func (r *GetBuckets) End(datetime types.DateTimeVariant) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// ExcludeInterim Refer to the description for the `exclude_interim` query parameter. +// Refer to the description for the `exclude_interim` query parameter. // API name: exclude_interim func (r *GetBuckets) ExcludeInterim(excludeinterim bool) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ExcludeInterim = &excludeinterim return r } -// Expand Refer to the description for the `expand` query parameter. +// Refer to the description for the `expand` query parameter. // API name: expand func (r *GetBuckets) Expand(expand bool) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Expand = &expand return r } // API name: page -func (r *GetBuckets) Page(page *types.Page) *GetBuckets { +func (r *GetBuckets) Page(page types.PageVariant) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } -// Sort Refer to the desription for the `sort` query parameter. +// Refer to the desription for the `sort` query parameter. // API name: sort func (r *GetBuckets) Sort(field string) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Sort = &field return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *GetBuckets) Start(datetime types.DateTime) *GetBuckets { - r.req.Start = datetime +func (r *GetBuckets) Start(datetime types.DateTimeVariant) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } diff --git a/typedapi/ml/getbuckets/request.go b/typedapi/ml/getbuckets/request.go index 17ba34d220..e0d0b5a91e 100644 --- a/typedapi/ml/getbuckets/request.go +++ b/typedapi/ml/getbuckets/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getbuckets @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package getbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_buckets/MlGetBucketsRequest.ts#L26-L133 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_buckets/MlGetBucketsRequest.ts#L26-L145 type Request struct { // AnomalyScore Refer to the description for the `anomaly_score` query parameter. diff --git a/typedapi/ml/getbuckets/response.go b/typedapi/ml/getbuckets/response.go index de8d396b39..1d5f8501ac 100644 --- a/typedapi/ml/getbuckets/response.go +++ b/typedapi/ml/getbuckets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getbuckets @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_buckets/MlGetBucketsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_buckets/MlGetBucketsResponse.ts#L23-L28 type Response struct { Buckets []types.BucketSummary `json:"buckets"` Count int64 `json:"count"` diff --git a/typedapi/ml/getcalendarevents/get_calendar_events.go b/typedapi/ml/getcalendarevents/get_calendar_events.go index 198aaf8162..6b7323d554 100644 --- a/typedapi/ml/getcalendarevents/get_calendar_events.go +++ b/typedapi/ml/getcalendarevents/get_calendar_events.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information about the scheduled events in calendars. +// Get info about events in calendars. package getcalendarevents import ( @@ -76,7 +76,7 @@ func NewGetCalendarEventsFunc(tp elastictransport.Interface) NewGetCalendarEvent } } -// Retrieves information about the scheduled events in calendars. +// Get info about events in calendars. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html func New(tp elastictransport.Interface) *GetCalendarEvents { diff --git a/typedapi/ml/getcalendarevents/response.go b/typedapi/ml/getcalendarevents/response.go index 83015fb855..2584acb8bd 100644 --- a/typedapi/ml/getcalendarevents/response.go +++ b/typedapi/ml/getcalendarevents/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getcalendarevents @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcalendarevents // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_calendar_events/MlGetCalendarEventsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_calendar_events/MlGetCalendarEventsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Events []types.CalendarEvent `json:"events"` diff --git a/typedapi/ml/getcalendars/get_calendars.go b/typedapi/ml/getcalendars/get_calendars.go index 8615f554e0..e56fd2970c 100644 --- a/typedapi/ml/getcalendars/get_calendars.go +++ b/typedapi/ml/getcalendars/get_calendars.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves configuration information for calendars. +// Get calendar configuration info. package getcalendars import ( @@ -79,7 +79,7 @@ func NewGetCalendarsFunc(tp elastictransport.Interface) NewGetCalendars { } } -// Retrieves configuration information for calendars. +// Get calendar configuration info. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html func New(tp elastictransport.Interface) *GetCalendars { @@ -89,8 +89,6 @@ func New(tp elastictransport.Interface) *GetCalendars { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -385,11 +383,15 @@ func (r *GetCalendars) Pretty(pretty bool) *GetCalendars { return r } -// Page This object is supported only when you omit the calendar identifier. +// This object is supported only when you omit the calendar identifier. // API name: page -func (r *GetCalendars) Page(page *types.Page) *GetCalendars { +func (r *GetCalendars) Page(page types.PageVariant) *GetCalendars { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } diff --git a/typedapi/ml/getcalendars/request.go b/typedapi/ml/getcalendars/request.go index 6f6f78800c..bebb7e9aec 100644 --- a/typedapi/ml/getcalendars/request.go +++ b/typedapi/ml/getcalendars/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getcalendars @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getcalendars // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_calendars/MlGetCalendarsRequest.ts#L25-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_calendars/MlGetCalendarsRequest.ts#L25-L63 type Request struct { // Page This object is supported only when you omit the calendar identifier. diff --git a/typedapi/ml/getcalendars/response.go b/typedapi/ml/getcalendars/response.go index b196d05a69..2ecd0e944b 100644 --- a/typedapi/ml/getcalendars/response.go +++ b/typedapi/ml/getcalendars/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getcalendars @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcalendars // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_calendars/MlGetCalendarsResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_calendars/MlGetCalendarsResponse.ts#L23-L25 type Response struct { Calendars []types.Calendar `json:"calendars"` Count int64 `json:"count"` diff --git a/typedapi/ml/getcategories/get_categories.go b/typedapi/ml/getcategories/get_categories.go index cb0e34e72e..de27061e04 100644 --- a/typedapi/ml/getcategories/get_categories.go +++ b/typedapi/ml/getcategories/get_categories.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves anomaly detection job results for one or more categories. +// Get anomaly detection job results for categories. package getcategories import ( @@ -84,7 +84,7 @@ func NewGetCategoriesFunc(tp elastictransport.Interface) NewGetCategories { } } -// Retrieves anomaly detection job results for one or more categories. +// Get anomaly detection job results for categories. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html func New(tp elastictransport.Interface) *GetCategories { @@ -94,8 +94,6 @@ func New(tp elastictransport.Interface) *GetCategories { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -426,12 +424,16 @@ func (r *GetCategories) Pretty(pretty bool) *GetCategories { return r } -// Page Configures pagination. +// Configures pagination. // This parameter has the `from` and `size` properties. // API name: page -func (r *GetCategories) Page(page *types.Page) *GetCategories { +func (r *GetCategories) Page(page types.PageVariant) *GetCategories { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } diff --git a/typedapi/ml/getcategories/request.go b/typedapi/ml/getcategories/request.go index dfe06da10d..c262b10ff1 100644 --- a/typedapi/ml/getcategories/request.go +++ b/typedapi/ml/getcategories/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getcategories @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getcategories // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_categories/MlGetCategoriesRequest.ts#L25-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_categories/MlGetCategoriesRequest.ts#L25-L82 type Request struct { // Page Configures pagination. diff --git a/typedapi/ml/getcategories/response.go b/typedapi/ml/getcategories/response.go index 7f44e61e83..1bd4a75e5b 100644 --- a/typedapi/ml/getcategories/response.go +++ b/typedapi/ml/getcategories/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getcategories @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcategories // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_categories/MlGetCategoriesResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_categories/MlGetCategoriesResponse.ts#L23-L28 type Response struct { Categories []types.Category `json:"categories"` Count int64 `json:"count"` diff --git a/typedapi/ml/getdatafeeds/get_datafeeds.go b/typedapi/ml/getdatafeeds/get_datafeeds.go index 6fcef44844..fe9756ed9d 100644 --- a/typedapi/ml/getdatafeeds/get_datafeeds.go +++ b/typedapi/ml/getdatafeeds/get_datafeeds.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves configuration information for datafeeds. +// Get datafeeds configuration info. // You can get information for multiple datafeeds in a single API request by // using a comma-separated list of datafeeds or a wildcard expression. You can // get information for all datafeeds by using `_all`, by specifying `*` as the @@ -79,7 +79,7 @@ func NewGetDatafeedsFunc(tp elastictransport.Interface) NewGetDatafeeds { } } -// Retrieves configuration information for datafeeds. +// Get datafeeds configuration info. // You can get information for multiple datafeeds in a single API request by // using a comma-separated list of datafeeds or a wildcard expression. You can // get information for all datafeeds by using `_all`, by specifying `*` as the diff --git a/typedapi/ml/getdatafeeds/response.go b/typedapi/ml/getdatafeeds/response.go index 3223a4e031..8faf31aa90 100644 --- a/typedapi/ml/getdatafeeds/response.go +++ b/typedapi/ml/getdatafeeds/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getdatafeeds @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatafeeds // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_datafeeds/MlGetDatafeedsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_datafeeds/MlGetDatafeedsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Datafeeds []types.MLDatafeed `json:"datafeeds"` diff --git a/typedapi/ml/getdatafeedstats/get_datafeed_stats.go b/typedapi/ml/getdatafeedstats/get_datafeed_stats.go index a2e8930fcb..7429e5c10c 100644 --- a/typedapi/ml/getdatafeedstats/get_datafeed_stats.go +++ b/typedapi/ml/getdatafeedstats/get_datafeed_stats.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves usage information for datafeeds. +// Get datafeeds usage info. // You can get statistics for multiple datafeeds in a single API request by // using a comma-separated list of datafeeds or a wildcard expression. You can // get statistics for all datafeeds by using `_all`, by specifying `*` as the @@ -80,7 +80,7 @@ func NewGetDatafeedStatsFunc(tp elastictransport.Interface) NewGetDatafeedStats } } -// Retrieves usage information for datafeeds. +// Get datafeeds usage info. // You can get statistics for multiple datafeeds in a single API request by // using a comma-separated list of datafeeds or a wildcard expression. You can // get statistics for all datafeeds by using `_all`, by specifying `*` as the diff --git a/typedapi/ml/getdatafeedstats/response.go b/typedapi/ml/getdatafeedstats/response.go index 32eb425527..523109b784 100644 --- a/typedapi/ml/getdatafeedstats/response.go +++ b/typedapi/ml/getdatafeedstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getdatafeedstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatafeedstats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_datafeed_stats/MlGetDatafeedStatsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_datafeed_stats/MlGetDatafeedStatsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Datafeeds []types.DatafeedStats `json:"datafeeds"` diff --git a/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go b/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go index 104a13789f..75ba383713 100644 --- a/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go +++ b/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves configuration information for data frame analytics jobs. +// Get data frame analytics job configuration info. // You can get information for multiple data frame analytics jobs in a single // API request by using a comma-separated list of data frame analytics jobs or a // wildcard expression. @@ -77,7 +77,7 @@ func NewGetDataFrameAnalyticsFunc(tp elastictransport.Interface) NewGetDataFrame } } -// Retrieves configuration information for data frame analytics jobs. +// Get data frame analytics job configuration info. // You can get information for multiple data frame analytics jobs in a single // API request by using a comma-separated list of data frame analytics jobs or a // wildcard expression. diff --git a/typedapi/ml/getdataframeanalytics/response.go b/typedapi/ml/getdataframeanalytics/response.go index 0347b027c8..4716c6d4c1 100644 --- a/typedapi/ml/getdataframeanalytics/response.go +++ b/typedapi/ml/getdataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getdataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_data_frame_analytics/MlGetDataFrameAnalyticsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_data_frame_analytics/MlGetDataFrameAnalyticsResponse.ts#L23-L29 type Response struct { Count int `json:"count"` // DataFrameAnalytics An array of data frame analytics job resources, which are sorted by the id diff --git a/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go b/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go index 929c390be3..7be516a0d4 100644 --- a/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go +++ b/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves usage information for data frame analytics jobs. +// Get data frame analytics jobs usage info. package getdataframeanalyticsstats import ( @@ -74,7 +74,7 @@ func NewGetDataFrameAnalyticsStatsFunc(tp elastictransport.Interface) NewGetData } } -// Retrieves usage information for data frame analytics jobs. +// Get data frame analytics jobs usage info. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html func New(tp elastictransport.Interface) *GetDataFrameAnalyticsStats { diff --git a/typedapi/ml/getdataframeanalyticsstats/response.go b/typedapi/ml/getdataframeanalyticsstats/response.go index 8feb5b974f..6c1959d842 100644 --- a/typedapi/ml/getdataframeanalyticsstats/response.go +++ b/typedapi/ml/getdataframeanalyticsstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getdataframeanalyticsstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdataframeanalyticsstats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_data_frame_analytics_stats/MlGetDataFrameAnalyticsStatsResponse.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_data_frame_analytics_stats/MlGetDataFrameAnalyticsStatsResponse.ts#L23-L29 type Response struct { Count int64 `json:"count"` // DataFrameAnalytics An array of objects that contain usage information for data frame analytics diff --git a/typedapi/ml/getfilters/get_filters.go b/typedapi/ml/getfilters/get_filters.go index d26fd22644..25b383fc13 100644 --- a/typedapi/ml/getfilters/get_filters.go +++ b/typedapi/ml/getfilters/get_filters.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves filters. +// Get filters. // You can get a single filter or all filters. package getfilters @@ -75,7 +75,7 @@ func NewGetFiltersFunc(tp elastictransport.Interface) NewGetFilters { } } -// Retrieves filters. +// Get filters. // You can get a single filter or all filters. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html diff --git a/typedapi/ml/getfilters/response.go b/typedapi/ml/getfilters/response.go index e2d71672c5..ea9ca85dff 100644 --- a/typedapi/ml/getfilters/response.go +++ b/typedapi/ml/getfilters/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getfilters @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getfilters // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_filters/MlGetFiltersResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_filters/MlGetFiltersResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Filters []types.MLFilter `json:"filters"` diff --git a/typedapi/ml/getinfluencers/get_influencers.go b/typedapi/ml/getinfluencers/get_influencers.go index e00607e4cb..a0fc53c780 100644 --- a/typedapi/ml/getinfluencers/get_influencers.go +++ b/typedapi/ml/getinfluencers/get_influencers.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves anomaly detection job results for one or more influencers. +// Get anomaly detection job results for influencers. // Influencers are the entities that have contributed to, or are to blame for, // the anomalies. Influencer results are available only if an // `influencer_field_name` is specified in the job configuration. @@ -84,7 +84,7 @@ func NewGetInfluencersFunc(tp elastictransport.Interface) NewGetInfluencers { } } -// Retrieves anomaly detection job results for one or more influencers. +// Get anomaly detection job results for influencers. // Influencers are the entities that have contributed to, or are to blame for, // the anomalies. Influencer results are available only if an // `influencer_field_name` is specified in the job configuration. @@ -97,8 +97,6 @@ func New(tp elastictransport.Interface) *GetInfluencers { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -439,12 +437,16 @@ func (r *GetInfluencers) Pretty(pretty bool) *GetInfluencers { return r } -// Page Configures pagination. +// Configures pagination. // This parameter has the `from` and `size` properties. // API name: page -func (r *GetInfluencers) Page(page *types.Page) *GetInfluencers { +func (r *GetInfluencers) Page(page types.PageVariant) *GetInfluencers { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } diff --git a/typedapi/ml/getinfluencers/request.go b/typedapi/ml/getinfluencers/request.go index 1a5ad20ea5..d2ae46988d 100644 --- a/typedapi/ml/getinfluencers/request.go +++ b/typedapi/ml/getinfluencers/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getinfluencers @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getinfluencers // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_influencers/MlGetInfluencersRequest.ts#L26-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_influencers/MlGetInfluencersRequest.ts#L26-L105 type Request struct { // Page Configures pagination. diff --git a/typedapi/ml/getinfluencers/response.go b/typedapi/ml/getinfluencers/response.go index 7960c6cab8..06f1d2c2ac 100644 --- a/typedapi/ml/getinfluencers/response.go +++ b/typedapi/ml/getinfluencers/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getinfluencers @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getinfluencers // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_influencers/MlGetInfluencersResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_influencers/MlGetInfluencersResponse.ts#L23-L29 type Response struct { Count int64 `json:"count"` // Influencers Array of influencer objects diff --git a/typedapi/ml/getjobs/get_jobs.go b/typedapi/ml/getjobs/get_jobs.go index 49bcd1e40b..e43a45947d 100644 --- a/typedapi/ml/getjobs/get_jobs.go +++ b/typedapi/ml/getjobs/get_jobs.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves configuration information for anomaly detection jobs. +// Get anomaly detection jobs configuration info. // You can get information for multiple anomaly detection jobs in a single API // request by using a group name, a comma-separated list of jobs, or a wildcard // expression. You can get information for all anomaly detection jobs by using @@ -78,7 +78,7 @@ func NewGetJobsFunc(tp elastictransport.Interface) NewGetJobs { } } -// Retrieves configuration information for anomaly detection jobs. +// Get anomaly detection jobs configuration info. // You can get information for multiple anomaly detection jobs in a single API // request by using a group name, a comma-separated list of jobs, or a wildcard // expression. You can get information for all anomaly detection jobs by using diff --git a/typedapi/ml/getjobs/response.go b/typedapi/ml/getjobs/response.go index 4a574309f2..c45d3f7963 100644 --- a/typedapi/ml/getjobs/response.go +++ b/typedapi/ml/getjobs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getjobs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getjobs // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_jobs/MlGetJobsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_jobs/MlGetJobsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Jobs []types.Job `json:"jobs"` diff --git a/typedapi/ml/getjobstats/get_job_stats.go b/typedapi/ml/getjobstats/get_job_stats.go index e225ff0c69..79c8864315 100644 --- a/typedapi/ml/getjobstats/get_job_stats.go +++ b/typedapi/ml/getjobstats/get_job_stats.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves usage information for anomaly detection jobs. +// Get anomaly detection jobs usage info. package getjobstats import ( @@ -74,7 +74,7 @@ func NewGetJobStatsFunc(tp elastictransport.Interface) NewGetJobStats { } } -// Retrieves usage information for anomaly detection jobs. +// Get anomaly detection jobs usage info. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html func New(tp elastictransport.Interface) *GetJobStats { diff --git a/typedapi/ml/getjobstats/response.go b/typedapi/ml/getjobstats/response.go index 350e884adf..563f075dee 100644 --- a/typedapi/ml/getjobstats/response.go +++ b/typedapi/ml/getjobstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getjobstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getjobstats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_job_stats/MlGetJobStatsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_job_stats/MlGetJobStatsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Jobs []types.JobStats `json:"jobs"` diff --git a/typedapi/ml/getmemorystats/get_memory_stats.go b/typedapi/ml/getmemorystats/get_memory_stats.go index 70abd85868..99f10454de 100644 --- a/typedapi/ml/getmemorystats/get_memory_stats.go +++ b/typedapi/ml/getmemorystats/get_memory_stats.go @@ -16,8 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Get machine learning memory usage info. // Get information about how machine learning jobs and trained models are using // memory, // on each node, both within the JVM heap, and natively, outside of the JVM. @@ -76,6 +77,7 @@ func NewGetMemoryStatsFunc(tp elastictransport.Interface) NewGetMemoryStats { } } +// Get machine learning memory usage info. // Get information about how machine learning jobs and trained models are using // memory, // on each node, both within the JVM heap, and natively, outside of the JVM. @@ -314,16 +316,6 @@ func (r *GetMemoryStats) NodeId(nodeid string) *GetMemoryStats { return r } -// Human Specify this query parameter to include the fields with units in the -// response. Otherwise only -// the `_in_bytes` sizes are returned in the response. -// API name: human -func (r *GetMemoryStats) Human(human bool) *GetMemoryStats { - r.values.Set("human", strconv.FormatBool(human)) - - return r -} - // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout // expires, the request fails and returns an error. @@ -366,6 +358,19 @@ func (r *GetMemoryStats) FilterPath(filterpaths ...string) *GetMemoryStats { return r } +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetMemoryStats) Human(human bool) *GetMemoryStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + // Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use // this option for debugging only. // API name: pretty diff --git a/typedapi/ml/getmemorystats/response.go b/typedapi/ml/getmemorystats/response.go index 29ddfea72b..688a06b074 100644 --- a/typedapi/ml/getmemorystats/response.go +++ b/typedapi/ml/getmemorystats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getmemorystats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmemorystats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_memory_stats/MlGetMemoryStatsResponse.ts#L25-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_memory_stats/MlGetMemoryStatsResponse.ts#L25-L31 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/ml/getmodelsnapshots/get_model_snapshots.go b/typedapi/ml/getmodelsnapshots/get_model_snapshots.go index 4c0c685f41..c635c9a23d 100644 --- a/typedapi/ml/getmodelsnapshots/get_model_snapshots.go +++ b/typedapi/ml/getmodelsnapshots/get_model_snapshots.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information about model snapshots. +// Get model snapshots info. package getmodelsnapshots import ( @@ -84,7 +84,7 @@ func NewGetModelSnapshotsFunc(tp elastictransport.Interface) NewGetModelSnapshot } } -// Retrieves information about model snapshots. +// Get model snapshots info. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html func New(tp elastictransport.Interface) *GetModelSnapshots { @@ -94,8 +94,6 @@ func New(tp elastictransport.Interface) *GetModelSnapshots { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -414,42 +412,66 @@ func (r *GetModelSnapshots) Pretty(pretty bool) *GetModelSnapshots { return r } -// Desc Refer to the description for the `desc` query parameter. +// Refer to the description for the `desc` query parameter. // API name: desc func (r *GetModelSnapshots) Desc(desc bool) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Desc = &desc return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *GetModelSnapshots) End(datetime types.DateTime) *GetModelSnapshots { - r.req.End = datetime +func (r *GetModelSnapshots) End(datetime types.DateTimeVariant) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } // API name: page -func (r *GetModelSnapshots) Page(page *types.Page) *GetModelSnapshots { +func (r *GetModelSnapshots) Page(page types.PageVariant) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } -// Sort Refer to the description for the `sort` query parameter. +// Refer to the description for the `sort` query parameter. // API name: sort func (r *GetModelSnapshots) Sort(field string) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Sort = &field return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *GetModelSnapshots) Start(datetime types.DateTime) *GetModelSnapshots { - r.req.Start = datetime +func (r *GetModelSnapshots) Start(datetime types.DateTimeVariant) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } diff --git a/typedapi/ml/getmodelsnapshots/request.go b/typedapi/ml/getmodelsnapshots/request.go index 82df599e48..6058dce78e 100644 --- a/typedapi/ml/getmodelsnapshots/request.go +++ b/typedapi/ml/getmodelsnapshots/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getmodelsnapshots @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package getmodelsnapshots // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_model_snapshots/MlGetModelSnapshotsRequest.ts#L26-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_model_snapshots/MlGetModelSnapshotsRequest.ts#L26-L108 type Request struct { // Desc Refer to the description for the `desc` query parameter. diff --git a/typedapi/ml/getmodelsnapshots/response.go b/typedapi/ml/getmodelsnapshots/response.go index 68316a4304..eb70e633ea 100644 --- a/typedapi/ml/getmodelsnapshots/response.go +++ b/typedapi/ml/getmodelsnapshots/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getmodelsnapshots @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmodelsnapshots // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_model_snapshots/MlGetModelSnapshotsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_model_snapshots/MlGetModelSnapshotsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` ModelSnapshots []types.ModelSnapshot `json:"model_snapshots"` diff --git a/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go b/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go index e44e117938..7582dbabd6 100644 --- a/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go +++ b/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves usage information for anomaly detection job model snapshot -// upgrades. +// Get anomaly detection job model snapshot upgrade usage info. package getmodelsnapshotupgradestats import ( @@ -82,8 +81,7 @@ func NewGetModelSnapshotUpgradeStatsFunc(tp elastictransport.Interface) NewGetMo } } -// Retrieves usage information for anomaly detection job model snapshot -// upgrades. +// Get anomaly detection job model snapshot upgrade usage info. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model-snapshot-upgrade-stats.html func New(tp elastictransport.Interface) *GetModelSnapshotUpgradeStats { diff --git a/typedapi/ml/getmodelsnapshotupgradestats/response.go b/typedapi/ml/getmodelsnapshotupgradestats/response.go index e3288bd45e..00088380c3 100644 --- a/typedapi/ml/getmodelsnapshotupgradestats/response.go +++ b/typedapi/ml/getmodelsnapshotupgradestats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getmodelsnapshotupgradestats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmodelsnapshotupgradestats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_model_snapshot_upgrade_stats/MlGetModelSnapshotUpgradeStatsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_model_snapshot_upgrade_stats/MlGetModelSnapshotUpgradeStatsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` ModelSnapshotUpgrades []types.ModelSnapshotUpgrade `json:"model_snapshot_upgrades"` diff --git a/typedapi/ml/getoverallbuckets/get_overall_buckets.go b/typedapi/ml/getoverallbuckets/get_overall_buckets.go index a221f531fe..205f33efee 100644 --- a/typedapi/ml/getoverallbuckets/get_overall_buckets.go +++ b/typedapi/ml/getoverallbuckets/get_overall_buckets.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves overall bucket results that summarize the bucket results of +// Get overall bucket results. +// +// Retrievs overall bucket results that summarize the bucket results of // multiple anomaly detection jobs. // // The `overall_score` is calculated by combining the scores of all the @@ -97,7 +99,9 @@ func NewGetOverallBucketsFunc(tp elastictransport.Interface) NewGetOverallBucket } } -// Retrieves overall bucket results that summarize the bucket results of +// Get overall bucket results. +// +// Retrievs overall bucket results that summarize the bucket results of // multiple anomaly detection jobs. // // The `overall_score` is calculated by combining the scores of all the @@ -123,8 +127,6 @@ func New(tp elastictransport.Interface) *GetOverallBuckets { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -400,57 +402,92 @@ func (r *GetOverallBuckets) Pretty(pretty bool) *GetOverallBuckets { return r } -// AllowNoMatch Refer to the description for the `allow_no_match` query parameter. +// Refer to the description for the `allow_no_match` query parameter. // API name: allow_no_match func (r *GetOverallBuckets) AllowNoMatch(allownomatch bool) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowNoMatch = &allownomatch return r } -// BucketSpan Refer to the description for the `bucket_span` query parameter. +// Refer to the description for the `bucket_span` query parameter. // API name: bucket_span -func (r *GetOverallBuckets) BucketSpan(duration types.Duration) *GetOverallBuckets { - r.req.BucketSpan = duration +func (r *GetOverallBuckets) BucketSpan(duration types.DurationVariant) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.BucketSpan = *duration.DurationCaster() return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *GetOverallBuckets) End(datetime types.DateTime) *GetOverallBuckets { - r.req.End = datetime +func (r *GetOverallBuckets) End(datetime types.DateTimeVariant) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// ExcludeInterim Refer to the description for the `exclude_interim` query parameter. +// Refer to the description for the `exclude_interim` query parameter. // API name: exclude_interim func (r *GetOverallBuckets) ExcludeInterim(excludeinterim bool) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ExcludeInterim = &excludeinterim return r } -// OverallScore Refer to the description for the `overall_score` query parameter. +// Refer to the description for the `overall_score` query parameter. // API name: overall_score func (r *GetOverallBuckets) OverallScore(overallscore string) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.OverallScore = overallscore return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *GetOverallBuckets) Start(datetime types.DateTime) *GetOverallBuckets { - r.req.Start = datetime +func (r *GetOverallBuckets) Start(datetime types.DateTimeVariant) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } -// TopN Refer to the description for the `top_n` query parameter. +// Refer to the description for the `top_n` query parameter. // API name: top_n func (r *GetOverallBuckets) TopN(topn int) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TopN = &topn return r diff --git a/typedapi/ml/getoverallbuckets/request.go b/typedapi/ml/getoverallbuckets/request.go index b2f2eb191f..9b2e2b953c 100644 --- a/typedapi/ml/getoverallbuckets/request.go +++ b/typedapi/ml/getoverallbuckets/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getoverallbuckets @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package getoverallbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_overall_buckets/MlGetOverallBucketsRequest.ts#L25-L143 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_overall_buckets/MlGetOverallBucketsRequest.ts#L25-L153 type Request struct { // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. diff --git a/typedapi/ml/getoverallbuckets/response.go b/typedapi/ml/getoverallbuckets/response.go index f874addb1f..5c5b2b6a5d 100644 --- a/typedapi/ml/getoverallbuckets/response.go +++ b/typedapi/ml/getoverallbuckets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getoverallbuckets @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getoverallbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_overall_buckets/MlGetOverallBucketsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_overall_buckets/MlGetOverallBucketsResponse.ts#L23-L29 type Response struct { Count int64 `json:"count"` // OverallBuckets Array of overall bucket objects diff --git a/typedapi/ml/getrecords/get_records.go b/typedapi/ml/getrecords/get_records.go index 9a5449773a..0435e3e83a 100644 --- a/typedapi/ml/getrecords/get_records.go +++ b/typedapi/ml/getrecords/get_records.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves anomaly records for an anomaly detection job. +// Get anomaly records for an anomaly detection job. // Records contain the detailed analytical results. They describe the anomalous // activity that has been identified in the input data based on the detector // configuration. @@ -91,7 +91,7 @@ func NewGetRecordsFunc(tp elastictransport.Interface) NewGetRecords { } } -// Retrieves anomaly records for an anomaly detection job. +// Get anomaly records for an anomaly detection job. // Records contain the detailed analytical results. They describe the anomalous // activity that has been identified in the input data based on the detector // configuration. @@ -111,8 +111,6 @@ func New(tp elastictransport.Interface) *GetRecords { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -399,59 +397,92 @@ func (r *GetRecords) Pretty(pretty bool) *GetRecords { return r } -// Desc Refer to the description for the `desc` query parameter. +// Refer to the description for the `desc` query parameter. // API name: desc func (r *GetRecords) Desc(desc bool) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Desc = &desc return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *GetRecords) End(datetime types.DateTime) *GetRecords { - r.req.End = datetime +func (r *GetRecords) End(datetime types.DateTimeVariant) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// ExcludeInterim Refer to the description for the `exclude_interim` query parameter. +// Refer to the description for the `exclude_interim` query parameter. // API name: exclude_interim func (r *GetRecords) ExcludeInterim(excludeinterim bool) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ExcludeInterim = &excludeinterim return r } // API name: page -func (r *GetRecords) Page(page *types.Page) *GetRecords { +func (r *GetRecords) Page(page types.PageVariant) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Page = page + r.req.Page = page.PageCaster() return r } -// RecordScore Refer to the description for the `record_score` query parameter. +// Refer to the description for the `record_score` query parameter. // API name: record_score func (r *GetRecords) RecordScore(recordscore types.Float64) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RecordScore = &recordscore return r } -// Sort Refer to the description for the `sort` query parameter. +// Refer to the description for the `sort` query parameter. // API name: sort func (r *GetRecords) Sort(field string) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Sort = &field return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *GetRecords) Start(datetime types.DateTime) *GetRecords { - r.req.Start = datetime +func (r *GetRecords) Start(datetime types.DateTimeVariant) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } diff --git a/typedapi/ml/getrecords/request.go b/typedapi/ml/getrecords/request.go index 832c40e4ff..7eece3f293 100644 --- a/typedapi/ml/getrecords/request.go +++ b/typedapi/ml/getrecords/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getrecords @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package getrecords // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_records/MlGetAnomalyRecordsRequest.ts#L26-L127 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_records/MlGetAnomalyRecordsRequest.ts#L26-L135 type Request struct { // Desc Refer to the description for the `desc` query parameter. diff --git a/typedapi/ml/getrecords/response.go b/typedapi/ml/getrecords/response.go index e749b501a9..dace3090b3 100644 --- a/typedapi/ml/getrecords/response.go +++ b/typedapi/ml/getrecords/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getrecords @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrecords // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_records/MlGetAnomalyRecordsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_records/MlGetAnomalyRecordsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Records []types.Anomaly `json:"records"` diff --git a/typedapi/ml/gettrainedmodels/get_trained_models.go b/typedapi/ml/gettrainedmodels/get_trained_models.go index a6a8748c89..038bbb0495 100644 --- a/typedapi/ml/gettrainedmodels/get_trained_models.go +++ b/typedapi/ml/gettrainedmodels/get_trained_models.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves configuration information for a trained model. +// Get trained model configuration info. package gettrainedmodels import ( @@ -75,7 +75,7 @@ func NewGetTrainedModelsFunc(tp elastictransport.Interface) NewGetTrainedModels } } -// Retrieves configuration information for a trained model. +// Get trained model configuration info. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models.html func New(tp elastictransport.Interface) *GetTrainedModels { @@ -360,6 +360,14 @@ func (r *GetTrainedModels) Include(include include.Include) *GetTrainedModels { return r } +// IncludeModelDefinition parameter is deprecated! Use [include=definition] instead +// API name: include_model_definition +func (r *GetTrainedModels) IncludeModelDefinition(includemodeldefinition bool) *GetTrainedModels { + r.values.Set("include_model_definition", strconv.FormatBool(includemodeldefinition)) + + return r +} + // Size Specifies the maximum number of models to obtain. // API name: size func (r *GetTrainedModels) Size(size int) *GetTrainedModels { diff --git a/typedapi/ml/gettrainedmodels/response.go b/typedapi/ml/gettrainedmodels/response.go index 888316ebc1..664b4feb1f 100644 --- a/typedapi/ml/gettrainedmodels/response.go +++ b/typedapi/ml/gettrainedmodels/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package gettrainedmodels @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettrainedmodels // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_trained_models/MlGetTrainedModelResponse.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_trained_models/MlGetTrainedModelResponse.ts#L23-L34 type Response struct { Count int `json:"count"` // TrainedModelConfigs An array of trained model resources, which are sorted by the model_id value diff --git a/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go b/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go index ee74ab5aaf..04d97261cc 100644 --- a/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go +++ b/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves usage information for trained models. You can get usage information -// for multiple trained +// Get trained models usage info. +// You can get usage information for multiple trained // models in a single API request by using a comma-separated list of model IDs // or a wildcard expression. package gettrainedmodelsstats @@ -77,8 +77,8 @@ func NewGetTrainedModelsStatsFunc(tp elastictransport.Interface) NewGetTrainedMo } } -// Retrieves usage information for trained models. You can get usage information -// for multiple trained +// Get trained models usage info. +// You can get usage information for multiple trained // models in a single API request by using a comma-separated list of model IDs // or a wildcard expression. // diff --git a/typedapi/ml/gettrainedmodelsstats/response.go b/typedapi/ml/gettrainedmodelsstats/response.go index 207aa6a933..79cf0c79e8 100644 --- a/typedapi/ml/gettrainedmodelsstats/response.go +++ b/typedapi/ml/gettrainedmodelsstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package gettrainedmodelsstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettrainedmodelsstats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_trained_models_stats/MlGetTrainedModelStatsResponse.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_trained_models_stats/MlGetTrainedModelStatsResponse.ts#L23-L33 type Response struct { // Count The total number of trained model statistics that matched the requested ID diff --git a/typedapi/ml/infertrainedmodel/infer_trained_model.go b/typedapi/ml/infertrainedmodel/infer_trained_model.go index 53b41dc133..462dcaf58d 100644 --- a/typedapi/ml/infertrainedmodel/infer_trained_model.go +++ b/typedapi/ml/infertrainedmodel/infer_trained_model.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Evaluates a trained model. +// Evaluate a trained model. package infertrainedmodel import ( @@ -81,7 +81,7 @@ func NewInferTrainedModelFunc(tp elastictransport.Interface) NewInferTrainedMode } } -// Evaluates a trained model. +// Evaluate a trained model. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html func New(tp elastictransport.Interface) *InferTrainedModel { @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *InferTrainedModel { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -386,23 +384,32 @@ func (r *InferTrainedModel) Pretty(pretty bool) *InferTrainedModel { return r } -// Docs An array of objects to pass to the model for inference. The objects should +// An array of objects to pass to the model for inference. The objects should // contain a fields matching your // configured trained model input. Typically, for NLP models, the field name is // `text_field`. // Currently, for NLP models, only a single value is allowed. // API name: docs -func (r *InferTrainedModel) Docs(docs ...map[string]json.RawMessage) *InferTrainedModel { +func (r *InferTrainedModel) Docs(docs []map[string]json.RawMessage) *InferTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Docs = docs return r } -// InferenceConfig The inference configuration updates to apply on the API call +// The inference configuration updates to apply on the API call // API name: inference_config -func (r *InferTrainedModel) InferenceConfig(inferenceconfig *types.InferenceConfigUpdateContainer) *InferTrainedModel { +func (r *InferTrainedModel) InferenceConfig(inferenceconfig types.InferenceConfigUpdateContainerVariant) *InferTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.InferenceConfig = inferenceconfig + r.req.InferenceConfig = inferenceconfig.InferenceConfigUpdateContainerCaster() return r } diff --git a/typedapi/ml/infertrainedmodel/request.go b/typedapi/ml/infertrainedmodel/request.go index b6d0e35feb..6f3b772123 100644 --- a/typedapi/ml/infertrainedmodel/request.go +++ b/typedapi/ml/infertrainedmodel/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package infertrainedmodel @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package infertrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L72 type Request struct { // Docs An array of objects to pass to the model for inference. The objects should diff --git a/typedapi/ml/infertrainedmodel/response.go b/typedapi/ml/infertrainedmodel/response.go index d8e11f7b30..74d87e3690 100644 --- a/typedapi/ml/infertrainedmodel/response.go +++ b/typedapi/ml/infertrainedmodel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package infertrainedmodel @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package infertrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/infer_trained_model/MlInferTrainedModelResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/infer_trained_model/MlInferTrainedModelResponse.ts#L22-L26 type Response struct { InferenceResults []types.InferenceResponseResult `json:"inference_results"` } diff --git a/typedapi/ml/info/info.go b/typedapi/ml/info/info.go index 4a5483fe20..1b80dc5bef 100644 --- a/typedapi/ml/info/info.go +++ b/typedapi/ml/info/info.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns defaults and limits used by machine learning. +// Get machine learning information. +// Get defaults and limits used by machine learning. // This endpoint is designed to be used by a user interface that needs to fully // understand machine learning configurations where some options are not // specified, meaning that the defaults should be used. This endpoint may be @@ -74,7 +75,8 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { } } -// Returns defaults and limits used by machine learning. +// Get machine learning information. +// Get defaults and limits used by machine learning. // This endpoint is designed to be used by a user interface that needs to fully // understand machine learning configurations where some options are not // specified, meaning that the defaults should be used. This endpoint may be diff --git a/typedapi/ml/info/response.go b/typedapi/ml/info/response.go index 550838a6c6..0bfb2082b2 100644 --- a/typedapi/ml/info/response.go +++ b/typedapi/ml/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/info/MlInfoResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/info/MlInfoResponse.ts#L22-L29 type Response struct { Defaults types.Defaults `json:"defaults"` Limits types.Limits `json:"limits"` diff --git a/typedapi/ml/openjob/open_job.go b/typedapi/ml/openjob/open_job.go index 259b2917df..d5129f6aa4 100644 --- a/typedapi/ml/openjob/open_job.go +++ b/typedapi/ml/openjob/open_job.go @@ -16,12 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Open anomaly detection jobs. -// An anomaly detection job must be opened in order for it to be ready to -// receive and analyze data. It can be opened and closed multiple times -// throughout its lifecycle. +// +// An anomaly detection job must be opened to be ready to receive and analyze +// data. It can be opened and closed multiple times throughout its lifecycle. // When you open a new job, it starts with an empty model. // When you open an existing job, the most recent model state is automatically // loaded. The job is ready to resume its analysis from where it left off, once @@ -89,9 +89,9 @@ func NewOpenJobFunc(tp elastictransport.Interface) NewOpenJob { } // Open anomaly detection jobs. -// An anomaly detection job must be opened in order for it to be ready to -// receive and analyze data. It can be opened and closed multiple times -// throughout its lifecycle. +// +// An anomaly detection job must be opened to be ready to receive and analyze +// data. It can be opened and closed multiple times throughout its lifecycle. // When you open a new job, it starts with an empty model. // When you open an existing job, the most recent model state is automatically // loaded. The job is ready to resume its analysis from where it left off, once @@ -105,8 +105,6 @@ func New(tp elastictransport.Interface) *OpenJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -375,10 +373,15 @@ func (r *OpenJob) Pretty(pretty bool) *OpenJob { return r } -// Timeout Refer to the description for the `timeout` query parameter. +// Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *OpenJob) Timeout(duration types.Duration) *OpenJob { - r.req.Timeout = duration +func (r *OpenJob) Timeout(duration types.DurationVariant) *OpenJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/openjob/request.go b/typedapi/ml/openjob/request.go index 4268a11a2f..2220a06f38 100644 --- a/typedapi/ml/openjob/request.go +++ b/typedapi/ml/openjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package openjob @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package openjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/open_job/MlOpenJobRequest.ts#L24-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/open_job/MlOpenJobRequest.ts#L24-L67 type Request struct { // Timeout Refer to the description for the `timeout` query parameter. diff --git a/typedapi/ml/openjob/response.go b/typedapi/ml/openjob/response.go index e34bef89ec..76494ab620 100644 --- a/typedapi/ml/openjob/response.go +++ b/typedapi/ml/openjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package openjob // Response holds the response body struct for the package openjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/open_job/MlOpenJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/open_job/MlOpenJobResponse.ts#L22-L31 type Response struct { // Node The ID of the node that the job was started on. In serverless this will be diff --git a/typedapi/ml/postcalendarevents/post_calendar_events.go b/typedapi/ml/postcalendarevents/post_calendar_events.go index 1eafb0f8f6..9dc5c3aa31 100644 --- a/typedapi/ml/postcalendarevents/post_calendar_events.go +++ b/typedapi/ml/postcalendarevents/post_calendar_events.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Adds scheduled events to a calendar. +// Add scheduled events to the calendar. package postcalendarevents import ( @@ -81,7 +81,7 @@ func NewPostCalendarEventsFunc(tp elastictransport.Interface) NewPostCalendarEve } } -// Adds scheduled events to a calendar. +// Add scheduled events to the calendar. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html func New(tp elastictransport.Interface) *PostCalendarEvents { @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *PostCalendarEvents { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,12 +359,19 @@ func (r *PostCalendarEvents) Pretty(pretty bool) *PostCalendarEvents { return r } -// Events A list of one of more scheduled events. The event’s start and end times can +// A list of one of more scheduled events. The event’s start and end times can // be specified as integer milliseconds since the epoch or as a string in ISO // 8601 format. // API name: events -func (r *PostCalendarEvents) Events(events ...types.CalendarEvent) *PostCalendarEvents { - r.req.Events = events +func (r *PostCalendarEvents) Events(events ...types.CalendarEventVariant) *PostCalendarEvents { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range events { + r.req.Events = append(r.req.Events, *v.CalendarEventCaster()) + + } return r } diff --git a/typedapi/ml/postcalendarevents/request.go b/typedapi/ml/postcalendarevents/request.go index 6a6b697cd6..9c0547a085 100644 --- a/typedapi/ml/postcalendarevents/request.go +++ b/typedapi/ml/postcalendarevents/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package postcalendarevents @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package postcalendarevents // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L48 type Request struct { // Events A list of one of more scheduled events. The event’s start and end times can diff --git a/typedapi/ml/postcalendarevents/response.go b/typedapi/ml/postcalendarevents/response.go index 0985aa767a..8c8ad1a4c3 100644 --- a/typedapi/ml/postcalendarevents/response.go +++ b/typedapi/ml/postcalendarevents/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package postcalendarevents @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package postcalendarevents // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/post_calendar_events/MlPostCalendarEventsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/post_calendar_events/MlPostCalendarEventsResponse.ts#L22-L24 type Response struct { Events []types.CalendarEvent `json:"events"` } diff --git a/typedapi/ml/postdata/post_data.go b/typedapi/ml/postdata/post_data.go index b8c863720d..fa035b3f4f 100644 --- a/typedapi/ml/postdata/post_data.go +++ b/typedapi/ml/postdata/post_data.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Sends data to an anomaly detection job for analysis. +// Send data to an anomaly detection job for analysis. // // IMPORTANT: For each job, data can be accepted from only a single connection // at a time. @@ -86,7 +86,7 @@ func NewPostDataFunc(tp elastictransport.Interface) NewPostData { } } -// Sends data to an anomaly detection job for analysis. +// Send data to an anomaly detection job for analysis. // // IMPORTANT: For each job, data can be accepted from only a single connection // at a time. @@ -101,8 +101,6 @@ func New(tp elastictransport.Interface) *PostData { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { diff --git a/typedapi/ml/postdata/request.go b/typedapi/ml/postdata/request.go index de81356792..c95b67fe79 100644 --- a/typedapi/ml/postdata/request.go +++ b/typedapi/ml/postdata/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package postdata @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package postdata // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/post_data/MlPostJobDataRequest.ts#L24-L68 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/post_data/MlPostJobDataRequest.ts#L24-L76 type Request = []json.RawMessage // NewRequest returns a Request diff --git a/typedapi/ml/postdata/response.go b/typedapi/ml/postdata/response.go index 65dad48f89..c09002c9c5 100644 --- a/typedapi/ml/postdata/response.go +++ b/typedapi/ml/postdata/response.go @@ -16,29 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package postdata // Response holds the response body struct for the package postdata // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/post_data/MlPostJobDataResponse.ts#L23-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/post_data/MlPostJobDataResponse.ts#L24-L45 type Response struct { - BucketCount int64 `json:"bucket_count"` - EarliestRecordTimestamp int64 `json:"earliest_record_timestamp"` - EmptyBucketCount int64 `json:"empty_bucket_count"` - InputBytes int64 `json:"input_bytes"` - InputFieldCount int64 `json:"input_field_count"` - InputRecordCount int64 `json:"input_record_count"` - InvalidDateCount int64 `json:"invalid_date_count"` - JobId string `json:"job_id"` - LastDataTime int `json:"last_data_time"` - LatestRecordTimestamp int64 `json:"latest_record_timestamp"` - MissingFieldCount int64 `json:"missing_field_count"` - OutOfOrderTimestampCount int64 `json:"out_of_order_timestamp_count"` - ProcessedFieldCount int64 `json:"processed_field_count"` - ProcessedRecordCount int64 `json:"processed_record_count"` - SparseBucketCount int64 `json:"sparse_bucket_count"` + BucketCount int64 `json:"bucket_count"` + EarliestRecordTimestamp *int64 `json:"earliest_record_timestamp,omitempty"` + EmptyBucketCount int64 `json:"empty_bucket_count"` + InputBytes int64 `json:"input_bytes"` + InputFieldCount int64 `json:"input_field_count"` + InputRecordCount int64 `json:"input_record_count"` + InvalidDateCount int64 `json:"invalid_date_count"` + JobId string `json:"job_id"` + LastDataTime *int64 `json:"last_data_time,omitempty"` + LatestEmptyBucketTimestamp *int64 `json:"latest_empty_bucket_timestamp,omitempty"` + LatestRecordTimestamp *int64 `json:"latest_record_timestamp,omitempty"` + LatestSparseBucketTimestamp *int64 `json:"latest_sparse_bucket_timestamp,omitempty"` + LogTime *int64 `json:"log_time,omitempty"` + MissingFieldCount int64 `json:"missing_field_count"` + OutOfOrderTimestampCount int64 `json:"out_of_order_timestamp_count"` + ProcessedFieldCount int64 `json:"processed_field_count"` + ProcessedRecordCount int64 `json:"processed_record_count"` + SparseBucketCount int64 `json:"sparse_bucket_count"` } // NewResponse returns a Response diff --git a/typedapi/ml/previewdatafeed/preview_datafeed.go b/typedapi/ml/previewdatafeed/preview_datafeed.go index 932f4b92e6..6516a01acd 100644 --- a/typedapi/ml/previewdatafeed/preview_datafeed.go +++ b/typedapi/ml/previewdatafeed/preview_datafeed.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Previews a datafeed. +// Preview a datafeed. // This API returns the first "page" of search results from a datafeed. // You can preview an existing datafeed or provide configuration details for a // datafeed @@ -92,7 +92,7 @@ func NewPreviewDatafeedFunc(tp elastictransport.Interface) NewPreviewDatafeed { } } -// Previews a datafeed. +// Preview a datafeed. // This API returns the first "page" of search results from a datafeed. // You can preview an existing datafeed or provide configuration details for a // datafeed @@ -115,8 +115,6 @@ func New(tp elastictransport.Interface) *PreviewDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -416,16 +414,20 @@ func (r *PreviewDatafeed) Pretty(pretty bool) *PreviewDatafeed { return r } -// DatafeedConfig The datafeed definition to preview. +// The datafeed definition to preview. // API name: datafeed_config -func (r *PreviewDatafeed) DatafeedConfig(datafeedconfig *types.DatafeedConfig) *PreviewDatafeed { +func (r *PreviewDatafeed) DatafeedConfig(datafeedconfig types.DatafeedConfigVariant) *PreviewDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DatafeedConfig = datafeedconfig + r.req.DatafeedConfig = datafeedconfig.DatafeedConfigCaster() return r } -// JobConfig The configuration details for the anomaly detection job that is associated +// The configuration details for the anomaly detection job that is associated // with the datafeed. If the // `datafeed_config` object does not include a `job_id` that references an // existing anomaly detection job, you must @@ -434,9 +436,13 @@ func (r *PreviewDatafeed) DatafeedConfig(datafeedconfig *types.DatafeedConfig) * // used. You cannot specify a `job_config` object unless you also supply a // `datafeed_config` object. // API name: job_config -func (r *PreviewDatafeed) JobConfig(jobconfig *types.JobConfig) *PreviewDatafeed { +func (r *PreviewDatafeed) JobConfig(jobconfig types.JobConfigVariant) *PreviewDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.JobConfig = jobconfig + r.req.JobConfig = jobconfig.JobConfigCaster() return r } diff --git a/typedapi/ml/previewdatafeed/request.go b/typedapi/ml/previewdatafeed/request.go index 4dfe22b61b..b1f71c5aab 100644 --- a/typedapi/ml/previewdatafeed/request.go +++ b/typedapi/ml/previewdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package previewdatafeed @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package previewdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/preview_datafeed/MlPreviewDatafeedRequest.ts#L26-L69 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/preview_datafeed/MlPreviewDatafeedRequest.ts#L26-L81 type Request struct { // DatafeedConfig The datafeed definition to preview. diff --git a/typedapi/ml/previewdatafeed/response.go b/typedapi/ml/previewdatafeed/response.go index cbef6ade46..185bd56a5d 100644 --- a/typedapi/ml/previewdatafeed/response.go +++ b/typedapi/ml/previewdatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package previewdatafeed @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package previewdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/preview_datafeed/MlPreviewDatafeedResponse.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/preview_datafeed/MlPreviewDatafeedResponse.ts#L20-L23 type Response []json.RawMessage diff --git a/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go b/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go index 7dd90baf7b..3922fab89e 100644 --- a/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go +++ b/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Previews the extracted features used by a data frame analytics config. +// Preview features used by data frame analytics. +// Preview the extracted features used by a data frame analytics config. package previewdataframeanalytics import ( @@ -79,9 +80,10 @@ func NewPreviewDataFrameAnalyticsFunc(tp elastictransport.Interface) NewPreviewD } } -// Previews the extracted features used by a data frame analytics config. +// Preview features used by data frame analytics. +// Preview the extracted features used by a data frame analytics config. // -// http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html func New(tp elastictransport.Interface) *PreviewDataFrameAnalytics { r := &PreviewDataFrameAnalytics{ transport: tp, @@ -89,8 +91,6 @@ func New(tp elastictransport.Interface) *PreviewDataFrameAnalytics { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -372,13 +372,17 @@ func (r *PreviewDataFrameAnalytics) Pretty(pretty bool) *PreviewDataFrameAnalyti return r } -// Config A data frame analytics config as described in create data frame analytics +// A data frame analytics config as described in create data frame analytics // jobs. Note that `id` and `dest` don’t need to be provided in the context of // this API. // API name: config -func (r *PreviewDataFrameAnalytics) Config(config *types.DataframePreviewConfig) *PreviewDataFrameAnalytics { +func (r *PreviewDataFrameAnalytics) Config(config types.DataframePreviewConfigVariant) *PreviewDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Config = config + r.req.Config = config.DataframePreviewConfigCaster() return r } diff --git a/typedapi/ml/previewdataframeanalytics/request.go b/typedapi/ml/previewdataframeanalytics/request.go index 2faac85d1b..6efb47650f 100644 --- a/typedapi/ml/previewdataframeanalytics/request.go +++ b/typedapi/ml/previewdataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package previewdataframeanalytics @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package previewdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L60 type Request struct { // Config A data frame analytics config as described in create data frame analytics diff --git a/typedapi/ml/previewdataframeanalytics/response.go b/typedapi/ml/previewdataframeanalytics/response.go index c1ed86cbad..1e034746da 100644 --- a/typedapi/ml/previewdataframeanalytics/response.go +++ b/typedapi/ml/previewdataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package previewdataframeanalytics // Response holds the response body struct for the package previewdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsResponse.ts#L23-L28 type Response struct { // FeatureValues An array of objects that contain feature name and value pairs. The features diff --git a/typedapi/ml/putcalendar/put_calendar.go b/typedapi/ml/putcalendar/put_calendar.go index d5128e2340..37a4cd82ef 100644 --- a/typedapi/ml/putcalendar/put_calendar.go +++ b/typedapi/ml/putcalendar/put_calendar.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a calendar. +// Create a calendar. package putcalendar import ( @@ -81,7 +81,7 @@ func NewPutCalendarFunc(tp elastictransport.Interface) NewPutCalendar { } } -// Creates a calendar. +// Create a calendar. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html func New(tp elastictransport.Interface) *PutCalendar { @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *PutCalendar { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -359,19 +357,30 @@ func (r *PutCalendar) Pretty(pretty bool) *PutCalendar { return r } -// Description A description of the calendar. +// A description of the calendar. // API name: description func (r *PutCalendar) Description(description string) *PutCalendar { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// JobIds An array of anomaly detection job identifiers. +// An array of anomaly detection job identifiers. // API name: job_ids func (r *PutCalendar) JobIds(jobids ...string) *PutCalendar { - r.req.JobIds = jobids + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range jobids { + r.req.JobIds = append(r.req.JobIds, v) + + } return r } diff --git a/typedapi/ml/putcalendar/request.go b/typedapi/ml/putcalendar/request.go index 78d55e6c56..910c9ae388 100644 --- a/typedapi/ml/putcalendar/request.go +++ b/typedapi/ml/putcalendar/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putcalendar @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package putcalendar // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_calendar/MlPutCalendarRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_calendar/MlPutCalendarRequest.ts#L23-L51 type Request struct { // Description A description of the calendar. diff --git a/typedapi/ml/putcalendar/response.go b/typedapi/ml/putcalendar/response.go index 21c3266414..e92b53a99a 100644 --- a/typedapi/ml/putcalendar/response.go +++ b/typedapi/ml/putcalendar/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putcalendar @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package putcalendar // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_calendar/MlPutCalendarResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_calendar/MlPutCalendarResponse.ts#L22-L31 type Response struct { // CalendarId A string that uniquely identifies a calendar. diff --git a/typedapi/ml/putcalendarjob/put_calendar_job.go b/typedapi/ml/putcalendarjob/put_calendar_job.go index 57102e7533..c143f8c68e 100644 --- a/typedapi/ml/putcalendarjob/put_calendar_job.go +++ b/typedapi/ml/putcalendarjob/put_calendar_job.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Adds an anomaly detection job to a calendar. +// Add anomaly detection job to calendar. package putcalendarjob import ( @@ -81,7 +81,7 @@ func NewPutCalendarJobFunc(tp elastictransport.Interface) NewPutCalendarJob { } } -// Adds an anomaly detection job to a calendar. +// Add anomaly detection job to calendar. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html func New(tp elastictransport.Interface) *PutCalendarJob { diff --git a/typedapi/ml/putcalendarjob/response.go b/typedapi/ml/putcalendarjob/response.go index 5940c9660e..e86d0034b0 100644 --- a/typedapi/ml/putcalendarjob/response.go +++ b/typedapi/ml/putcalendarjob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putcalendarjob @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package putcalendarjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_calendar_job/MlPutCalendarJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_calendar_job/MlPutCalendarJobResponse.ts#L22-L31 type Response struct { // CalendarId A string that uniquely identifies a calendar. diff --git a/typedapi/ml/putdatafeed/put_datafeed.go b/typedapi/ml/putdatafeed/put_datafeed.go index 62a148f47f..311950a47e 100644 --- a/typedapi/ml/putdatafeed/put_datafeed.go +++ b/typedapi/ml/putdatafeed/put_datafeed.go @@ -16,15 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Instantiates a datafeed. +// Create a datafeed. // Datafeeds retrieve data from Elasticsearch for analysis by an anomaly // detection job. // You can associate only one datafeed with each anomaly detection job. // The datafeed contains a query that runs at a defined interval (`frequency`). // If you are concerned about delayed data, you can add a delay (`query_delay') // at each interval. +// By default, the datafeed uses the following query: `{"match_all": {"boost": +// 1}}`. +// // When Elasticsearch security features are enabled, your datafeed remembers // which roles the user who created it had // at the time of creation and runs the query using those same roles. If you @@ -97,13 +100,16 @@ func NewPutDatafeedFunc(tp elastictransport.Interface) NewPutDatafeed { } } -// Instantiates a datafeed. +// Create a datafeed. // Datafeeds retrieve data from Elasticsearch for analysis by an anomaly // detection job. // You can associate only one datafeed with each anomaly detection job. // The datafeed contains a query that runs at a defined interval (`frequency`). // If you are concerned about delayed data, you can add a delay (`query_delay') // at each interval. +// By default, the datafeed uses the following query: `{"match_all": {"boost": +// 1}}`. +// // When Elasticsearch security features are enabled, your datafeed remembers // which roles the user who created it had // at the time of creation and runs the query using those same roles. If you @@ -122,8 +128,6 @@ func New(tp elastictransport.Interface) *PutDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -434,18 +438,39 @@ func (r *PutDatafeed) Pretty(pretty bool) *PutDatafeed { return r } -// Aggregations If set, the datafeed performs aggregation searches. +// If set, the datafeed performs aggregation searches. // Support for aggregations is limited and should be used only with low // cardinality data. // API name: aggregations func (r *PutDatafeed) Aggregations(aggregations map[string]types.Aggregations) *PutDatafeed { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *PutDatafeed) AddAggregation(key string, value types.AggregationsVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + r.req.Aggregations = tmp return r } -// ChunkingConfig Datafeeds might be required to search over long time periods, for several +// Datafeeds might be required to search over long time periods, for several // months or years. // This search is split into time chunks in order to ensure the load on // Elasticsearch is managed. @@ -453,14 +478,18 @@ func (r *PutDatafeed) Aggregations(aggregations map[string]types.Aggregations) * // calculated; // it is an advanced configuration option. // API name: chunking_config -func (r *PutDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *PutDatafeed { +func (r *PutDatafeed) ChunkingConfig(chunkingconfig types.ChunkingConfigVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ChunkingConfig = chunkingconfig + r.req.ChunkingConfig = chunkingconfig.ChunkingConfigCaster() return r } -// DelayedDataCheckConfig Specifies whether the datafeed checks for missing data and the size of the +// Specifies whether the datafeed checks for missing data and the size of the // window. // The datafeed can optionally search over indices that have already been read // in an effort to determine whether @@ -470,14 +499,18 @@ func (r *PutDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *PutD // has passed that moment in time. // This check runs only on real-time datafeeds. // API name: delayed_data_check_config -func (r *PutDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.DelayedDataCheckConfig) *PutDatafeed { +func (r *PutDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig types.DelayedDataCheckConfigVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DelayedDataCheckConfig = delayeddatacheckconfig + r.req.DelayedDataCheckConfig = delayeddatacheckconfig.DelayedDataCheckConfigCaster() return r } -// Frequency The interval at which scheduled queries are made while the datafeed runs in +// The interval at which scheduled queries are made while the datafeed runs in // real time. // The default value is either the bucket span for short bucket spans, or, for // longer bucket spans, a sensible @@ -488,47 +521,71 @@ func (r *PutDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.Delay // aggregations, this value must be divisible by the interval of the date // histogram aggregation. // API name: frequency -func (r *PutDatafeed) Frequency(duration types.Duration) *PutDatafeed { - r.req.Frequency = duration +func (r *PutDatafeed) Frequency(duration types.DurationVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } // API name: headers -func (r *PutDatafeed) Headers(httpheaders types.HttpHeaders) *PutDatafeed { - r.req.Headers = httpheaders +func (r *PutDatafeed) Headers(httpheaders types.HttpHeadersVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Headers = *httpheaders.HttpHeadersCaster() return r } -// Indices An array of index names. Wildcards are supported. If any of the indices are +// An array of index names. Wildcards are supported. If any of the indices are // in remote clusters, the machine // learning nodes must have the `remote_cluster_client` role. // API name: indices func (r *PutDatafeed) Indices(indices ...string) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Indices = indices return r } -// IndicesOptions Specifies index expansion options that are used during search +// Specifies index expansion options that are used during search // API name: indices_options -func (r *PutDatafeed) IndicesOptions(indicesoptions *types.IndicesOptions) *PutDatafeed { +func (r *PutDatafeed) IndicesOptions(indicesoptions types.IndicesOptionsVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndicesOptions = indicesoptions + r.req.IndicesOptions = indicesoptions.IndicesOptionsCaster() return r } -// JobId Identifier for the anomaly detection job. +// Identifier for the anomaly detection job. // API name: job_id func (r *PutDatafeed) JobId(id string) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.JobId = &id return r } -// MaxEmptySearches If a real-time datafeed has never seen any data (including during any initial +// If a real-time datafeed has never seen any data (including during any initial // training period), it automatically // stops and closes the associated job after this many real-time searches return // no documents. In other words, @@ -538,25 +595,34 @@ func (r *PutDatafeed) JobId(id string) *PutDatafeed { // default, it is not set. // API name: max_empty_searches func (r *PutDatafeed) MaxEmptySearches(maxemptysearches int) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxEmptySearches = &maxemptysearches return r } -// Query The Elasticsearch query domain-specific language (DSL). This value +// The Elasticsearch query domain-specific language (DSL). This value // corresponds to the query object in an // Elasticsearch search POST body. All the options that are supported by // Elasticsearch can be used, as this // object is passed verbatim to Elasticsearch. // API name: query -func (r *PutDatafeed) Query(query *types.Query) *PutDatafeed { +func (r *PutDatafeed) Query(query types.QueryVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// QueryDelay The number of seconds behind real time that data is queried. For example, if +// The number of seconds behind real time that data is queried. For example, if // data from 10:04 a.m. might // not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 // seconds. The default @@ -564,38 +630,74 @@ func (r *PutDatafeed) Query(query *types.Query) *PutDatafeed { // the query performance // when there are multiple jobs running on the same node. // API name: query_delay -func (r *PutDatafeed) QueryDelay(duration types.Duration) *PutDatafeed { - r.req.QueryDelay = duration +func (r *PutDatafeed) QueryDelay(duration types.DurationVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryDelay = *duration.DurationCaster() return r } -// RuntimeMappings Specifies runtime fields for the datafeed search. +// Specifies runtime fields for the datafeed search. // API name: runtime_mappings -func (r *PutDatafeed) RuntimeMappings(runtimefields types.RuntimeFields) *PutDatafeed { - r.req.RuntimeMappings = runtimefields +func (r *PutDatafeed) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Specifies scripts that evaluate custom expressions and returns script fields +// Specifies scripts that evaluate custom expressions and returns script fields // to the datafeed. // The detector configuration objects in a job can contain functions that use // these script fields. // API name: script_fields func (r *PutDatafeed) ScriptFields(scriptfields map[string]types.ScriptField) *PutDatafeed { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} + +func (r *PutDatafeed) AddScriptField(key string, value types.ScriptFieldVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + r.req.ScriptFields = tmp return r } -// ScrollSize The size parameter that is used in Elasticsearch searches when the datafeed +// The size parameter that is used in Elasticsearch searches when the datafeed // does not use aggregations. // The maximum value is the value of `index.max_result_window`, which is 10,000 // by default. // API name: scroll_size func (r *PutDatafeed) ScrollSize(scrollsize int) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScrollSize = &scrollsize return r diff --git a/typedapi/ml/putdatafeed/request.go b/typedapi/ml/putdatafeed/request.go index 2f1dce132f..43344504f4 100644 --- a/typedapi/ml/putdatafeed/request.go +++ b/typedapi/ml/putdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putdatafeed @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L172 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L184 type Request struct { // Aggregations If set, the datafeed performs aggregation searches. @@ -151,7 +151,7 @@ func (s *Request) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": + case "aggregations", "aggs": if s.Aggregations == nil { s.Aggregations = make(map[string]types.Aggregations, 0) } diff --git a/typedapi/ml/putdatafeed/response.go b/typedapi/ml/putdatafeed/response.go index 214039fb7d..d6203de008 100644 --- a/typedapi/ml/putdatafeed/response.go +++ b/typedapi/ml/putdatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putdatafeed @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_datafeed/MlPutDatafeedResponse.ts#L31-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_datafeed/MlPutDatafeedResponse.ts#L31-L49 type Response struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Authorization *types.DatafeedAuthorization `json:"authorization,omitempty"` diff --git a/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go b/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go index 9315a62e70..962ae61da6 100644 --- a/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go +++ b/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Instantiates a data frame analytics job. +// Create a data frame analytics job. // This API creates a data frame analytics job that performs an analysis on the // source indices and stores the outcome in a destination index. +// By default, the query used in the source configuration is `{"match_all": +// {}}`. +// +// If the destination index does not exist, it is created automatically when you +// start the job. +// +// If you supply only a subset of the regression or classification parameters, +// hyperparameter optimization occurs. It determines a value for each of the +// undefined parameters. package putdataframeanalytics import ( @@ -83,9 +92,18 @@ func NewPutDataFrameAnalyticsFunc(tp elastictransport.Interface) NewPutDataFrame } } -// Instantiates a data frame analytics job. +// Create a data frame analytics job. // This API creates a data frame analytics job that performs an analysis on the // source indices and stores the outcome in a destination index. +// By default, the query used in the source configuration is `{"match_all": +// {}}`. +// +// If the destination index does not exist, it is created automatically when you +// start the job. +// +// If you supply only a subset of the regression or classification parameters, +// hyperparameter optimization occurs. It determines a value for each of the +// undefined parameters. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-dfanalytics.html func New(tp elastictransport.Interface) *PutDataFrameAnalytics { @@ -95,8 +113,6 @@ func New(tp elastictransport.Interface) *PutDataFrameAnalytics { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,7 +383,7 @@ func (r *PutDataFrameAnalytics) Pretty(pretty bool) *PutDataFrameAnalytics { return r } -// AllowLazyStart Specifies whether this job can start when there is insufficient machine +// Specifies whether this job can start when there is insufficient machine // learning node capacity for it to be immediately assigned to a node. If // set to `false` and a machine learning node with capacity to run the job // cannot be immediately found, the API returns an error. If set to `true`, @@ -377,23 +393,32 @@ func (r *PutDataFrameAnalytics) Pretty(pretty bool) *PutDataFrameAnalytics { // `xpack.ml.max_lazy_ml_nodes` setting. // API name: allow_lazy_start func (r *PutDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyStart = &allowlazystart return r } -// Analysis The analysis configuration, which contains the information necessary to +// The analysis configuration, which contains the information necessary to // perform one of the following types of analysis: classification, outlier // detection, or regression. // API name: analysis -func (r *PutDataFrameAnalytics) Analysis(analysis *types.DataframeAnalysisContainer) *PutDataFrameAnalytics { +func (r *PutDataFrameAnalytics) Analysis(analysis types.DataframeAnalysisContainerVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Analysis = *analysis + r.req.Analysis = *analysis.DataframeAnalysisContainerCaster() return r } -// AnalyzedFields Specifies `includes` and/or `excludes` patterns to select which fields +// Specifies `includes` and/or `excludes` patterns to select which fields // will be included in the analysis. The patterns specified in `excludes` // are applied last, therefore `excludes` takes precedence. In other words, // if the same field is specified in both `includes` and `excludes`, then @@ -422,73 +447,120 @@ func (r *PutDataFrameAnalytics) Analysis(analysis *types.DataframeAnalysisContai // values to a single number. For example, in case of age ranges, you can // model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. // API name: analyzed_fields -func (r *PutDataFrameAnalytics) AnalyzedFields(analyzedfields *types.DataframeAnalysisAnalyzedFields) *PutDataFrameAnalytics { +func (r *PutDataFrameAnalytics) AnalyzedFields(analyzedfields types.DataframeAnalysisAnalyzedFieldsVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalyzedFields = analyzedfields + r.req.AnalyzedFields = analyzedfields.DataframeAnalysisAnalyzedFieldsCaster() return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *PutDataFrameAnalytics) Description(description string) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination configuration. +// The destination configuration. // API name: dest -func (r *PutDataFrameAnalytics) Dest(dest *types.DataframeAnalyticsDestination) *PutDataFrameAnalytics { +func (r *PutDataFrameAnalytics) Dest(dest types.DataframeAnalyticsDestinationVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = *dest + r.req.Dest = *dest.DataframeAnalyticsDestinationCaster() return r } // API name: headers -func (r *PutDataFrameAnalytics) Headers(httpheaders types.HttpHeaders) *PutDataFrameAnalytics { - r.req.Headers = httpheaders +func (r *PutDataFrameAnalytics) Headers(httpheaders types.HttpHeadersVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Headers = *httpheaders.HttpHeadersCaster() return r } -// MaxNumThreads The maximum number of threads to be used by the analysis. Using more +// The maximum number of threads to be used by the analysis. Using more // threads may decrease the time necessary to complete the analysis at the // cost of using more CPU. Note that the process may use additional threads // for operational functionality other than the analysis itself. // API name: max_num_threads func (r *PutDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxNumThreads = &maxnumthreads return r } -// ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for +// API name: _meta +func (r *PutDataFrameAnalytics) Meta_(metadata types.MetadataVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// The approximate maximum amount of memory resources that are permitted for // analytical processing. If your `elasticsearch.yml` file contains an // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try // to create data frame analytics jobs that have `model_memory_limit` values // greater than that setting. // API name: model_memory_limit func (r *PutDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelMemoryLimit = &modelmemorylimit return r } -// Source The configuration of how to source the analysis data. +// The configuration of how to source the analysis data. // API name: source -func (r *PutDataFrameAnalytics) Source(source *types.DataframeAnalyticsSource) *PutDataFrameAnalytics { +func (r *PutDataFrameAnalytics) Source(source types.DataframeAnalyticsSourceVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = *source + r.req.Source = *source.DataframeAnalyticsSourceCaster() return r } // API name: version func (r *PutDataFrameAnalytics) Version(versionstring string) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Version = &versionstring return r diff --git a/typedapi/ml/putdataframeanalytics/request.go b/typedapi/ml/putdataframeanalytics/request.go index d9e23d5bf1..b3a55ba019 100644 --- a/typedapi/ml/putdataframeanalytics/request.go +++ b/typedapi/ml/putdataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putdataframeanalytics @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsRequest.ts#L30-L141 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsRequest.ts#L30-L155 type Request struct { // AllowLazyStart Specifies whether this job can start when there is insufficient machine @@ -87,7 +87,8 @@ type Request struct { // threads may decrease the time necessary to complete the analysis at the // cost of using more CPU. Note that the process may use additional threads // for operational functionality other than the analysis itself. - MaxNumThreads *int `json:"max_num_threads,omitempty"` + MaxNumThreads *int `json:"max_num_threads,omitempty"` + Meta_ types.Metadata `json:"_meta,omitempty"` // ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for // analytical processing. If your `elasticsearch.yml` file contains an // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try @@ -194,6 +195,11 @@ func (s *Request) UnmarshalJSON(data []byte) error { s.MaxNumThreads = &f } + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + case "model_memory_limit": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { diff --git a/typedapi/ml/putdataframeanalytics/response.go b/typedapi/ml/putdataframeanalytics/response.go index 12dd2b43a2..d7468c29ba 100644 --- a/typedapi/ml/putdataframeanalytics/response.go +++ b/typedapi/ml/putdataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putdataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsResponse.ts#L31-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsResponse.ts#L31-L47 type Response struct { AllowLazyStart bool `json:"allow_lazy_start"` Analysis types.DataframeAnalysisContainer `json:"analysis"` @@ -37,6 +37,7 @@ type Response struct { Dest types.DataframeAnalyticsDestination `json:"dest"` Id string `json:"id"` MaxNumThreads int `json:"max_num_threads"` + Meta_ types.Metadata `json:"_meta,omitempty"` ModelMemoryLimit string `json:"model_memory_limit"` Source types.DataframeAnalyticsSource `json:"source"` Version string `json:"version"` diff --git a/typedapi/ml/putfilter/put_filter.go b/typedapi/ml/putfilter/put_filter.go index 64808ec902..7521588172 100644 --- a/typedapi/ml/putfilter/put_filter.go +++ b/typedapi/ml/putfilter/put_filter.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Instantiates a filter. +// Create a filter. // A filter contains a list of strings. It can be used by one or more anomaly // detection jobs. // Specifically, filters are referenced in the `custom_rules` property of @@ -85,7 +85,7 @@ func NewPutFilterFunc(tp elastictransport.Interface) NewPutFilter { } } -// Instantiates a filter. +// Create a filter. // A filter contains a list of strings. It can be used by one or more anomaly // detection jobs. // Specifically, filters are referenced in the `custom_rules` property of @@ -99,8 +99,6 @@ func New(tp elastictransport.Interface) *PutFilter { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,21 +365,32 @@ func (r *PutFilter) Pretty(pretty bool) *PutFilter { return r } -// Description A description of the filter. +// A description of the filter. // API name: description func (r *PutFilter) Description(description string) *PutFilter { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Items The items of the filter. A wildcard `*` can be used at the beginning or the +// The items of the filter. A wildcard `*` can be used at the beginning or the // end of an item. // Up to 10000 items are allowed in each filter. // API name: items func (r *PutFilter) Items(items ...string) *PutFilter { - r.req.Items = items + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range items { + r.req.Items = append(r.req.Items, v) + + } return r } diff --git a/typedapi/ml/putfilter/request.go b/typedapi/ml/putfilter/request.go index 2a397799c2..908a727519 100644 --- a/typedapi/ml/putfilter/request.go +++ b/typedapi/ml/putfilter/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putfilter @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package putfilter // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_filter/MlPutFilterRequest.ts#L23-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_filter/MlPutFilterRequest.ts#L23-L58 type Request struct { // Description A description of the filter. diff --git a/typedapi/ml/putfilter/response.go b/typedapi/ml/putfilter/response.go index 7bc62d5ae4..3660d1772d 100644 --- a/typedapi/ml/putfilter/response.go +++ b/typedapi/ml/putfilter/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putfilter // Response holds the response body struct for the package putfilter // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_filter/MlPutFilterResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_filter/MlPutFilterResponse.ts#L22-L28 type Response struct { Description string `json:"description"` FilterId string `json:"filter_id"` diff --git a/typedapi/ml/putjob/put_job.go b/typedapi/ml/putjob/put_job.go index 3af3b9c745..17affa1641 100644 --- a/typedapi/ml/putjob/put_job.go +++ b/typedapi/ml/putjob/put_job.go @@ -16,11 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create an anomaly detection job. +// // If you include a `datafeed_config`, you must have read index privileges on // the source index. +// If you include a `datafeed_config` but do not provide a query, the datafeed +// uses `{"match_all": {"boost": 1}}`. package putjob import ( @@ -37,6 +40,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -84,8 +88,11 @@ func NewPutJobFunc(tp elastictransport.Interface) NewPutJob { } // Create an anomaly detection job. +// // If you include a `datafeed_config`, you must have read index privileges on // the source index. +// If you include a `datafeed_config` but do not provide a query, the datafeed +// uses `{"match_all": {"boost": 1}}`. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html func New(tp elastictransport.Interface) *PutJob { @@ -95,8 +102,6 @@ func New(tp elastictransport.Interface) *PutJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -321,6 +326,56 @@ func (r *PutJob) _jobid(jobid string) *PutJob { return r } +// AllowNoIndices If `true`, wildcard indices expressions that resolve into no concrete indices +// are ignored. This includes the +// `_all` string or when no indices are specified. +// API name: allow_no_indices +func (r *PutJob) AllowNoIndices(allownoindices bool) *PutJob { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// data streams, this argument determines +// whether wildcard expressions match hidden data streams. Supports +// comma-separated values. Valid values are: +// +// * `all`: Match any data stream or index, including hidden ones. +// * `closed`: Match closed, non-hidden indices. Also matches any non-hidden +// data stream. Data streams cannot be closed. +// * `hidden`: Match hidden data streams and hidden indices. Must be combined +// with `open`, `closed`, or both. +// * `none`: Wildcard patterns are not accepted. +// * `open`: Match open, non-hidden indices. Also matches any non-hidden data +// stream. +// API name: expand_wildcards +func (r *PutJob) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutJob { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If `true`, concrete, expanded or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *PutJob) IgnoreThrottled(ignorethrottled bool) *PutJob { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If `true`, unavailable indices (missing or closed) are ignored. +// API name: ignore_unavailable +func (r *PutJob) IgnoreUnavailable(ignoreunavailable bool) *PutJob { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -365,7 +420,7 @@ func (r *PutJob) Pretty(pretty bool) *PutJob { return r } -// AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there +// Advanced configuration option. Specifies whether this job can open when there // is insufficient machine learning node capacity for it to be immediately // assigned to a node. By default, if a machine learning node with capacity to // run the job cannot immediately be found, the open anomaly detection jobs API @@ -375,110 +430,156 @@ func (r *PutJob) Pretty(pretty bool) *PutJob { // opening state until sufficient machine learning node capacity is available. // API name: allow_lazy_open func (r *PutJob) AllowLazyOpen(allowlazyopen bool) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyOpen = &allowlazyopen return r } -// AnalysisConfig Specifies how to analyze the data. After you create a job, you cannot change +// Specifies how to analyze the data. After you create a job, you cannot change // the analysis configuration; all the properties are informational. // API name: analysis_config -func (r *PutJob) AnalysisConfig(analysisconfig *types.AnalysisConfig) *PutJob { +func (r *PutJob) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisConfig = *analysisconfig + r.req.AnalysisConfig = *analysisconfig.AnalysisConfigCaster() return r } -// AnalysisLimits Limits can be applied for the resources required to hold the mathematical +// Limits can be applied for the resources required to hold the mathematical // models in memory. These limits are approximate and can be set per job. They // do not control the memory used by other processes, for example the // Elasticsearch Java processes. // API name: analysis_limits -func (r *PutJob) AnalysisLimits(analysislimits *types.AnalysisLimits) *PutJob { +func (r *PutJob) AnalysisLimits(analysislimits types.AnalysisLimitsVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisLimits = analysislimits + r.req.AnalysisLimits = analysislimits.AnalysisLimitsCaster() return r } -// BackgroundPersistInterval Advanced configuration option. The time between each periodic persistence of +// Advanced configuration option. The time between each periodic persistence of // the model. The default value is a randomized value between 3 to 4 hours, // which avoids all jobs persisting at exactly the same time. The smallest // allowed value is 1 hour. For very large models (several GB), persistence // could take 10-20 minutes, so do not set the `background_persist_interval` // value too low. // API name: background_persist_interval -func (r *PutJob) BackgroundPersistInterval(duration types.Duration) *PutJob { - r.req.BackgroundPersistInterval = duration +func (r *PutJob) BackgroundPersistInterval(duration types.DurationVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.BackgroundPersistInterval = *duration.DurationCaster() return r } -// CustomSettings Advanced configuration option. Contains custom meta data about the job. +// Advanced configuration option. Contains custom meta data about the job. // API name: custom_settings func (r *PutJob) CustomSettings(customsettings json.RawMessage) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CustomSettings = customsettings return r } -// DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old +// Advanced configuration option, which affects the automatic removal of old // model snapshots for this job. It specifies a period of time (in days) after // which only the first snapshot per day is retained. This period is relative to // the timestamp of the most recent snapshot for this job. Valid values range // from 0 to `model_snapshot_retention_days`. // API name: daily_model_snapshot_retention_after_days func (r *PutJob) DailyModelSnapshotRetentionAfterDays(dailymodelsnapshotretentionafterdays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.DailyModelSnapshotRetentionAfterDays = &dailymodelsnapshotretentionafterdays return r } -// DataDescription Defines the format of the input data when you send data to the job by using +// Defines the format of the input data when you send data to the job by using // the post data API. Note that when configure a datafeed, these properties are // automatically set. When data is received via the post data API, it is not // stored in Elasticsearch. Only the results for anomaly detection are retained. // API name: data_description -func (r *PutJob) DataDescription(datadescription *types.DataDescription) *PutJob { +func (r *PutJob) DataDescription(datadescription types.DataDescriptionVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DataDescription = *datadescription + r.req.DataDescription = *datadescription.DataDescriptionCaster() return r } -// DatafeedConfig Defines a datafeed for the anomaly detection job. If Elasticsearch security +// Defines a datafeed for the anomaly detection job. If Elasticsearch security // features are enabled, your datafeed remembers which roles the user who // created it had at the time of creation and runs the query using those same // roles. If you provide secondary authorization headers, those credentials are // used instead. // API name: datafeed_config -func (r *PutJob) DatafeedConfig(datafeedconfig *types.DatafeedConfig) *PutJob { +func (r *PutJob) DatafeedConfig(datafeedconfig types.DatafeedConfigVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DatafeedConfig = datafeedconfig + r.req.DatafeedConfig = datafeedconfig.DatafeedConfigCaster() return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *PutJob) Description(description string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Groups A list of job groups. A job can belong to no groups or many. +// A list of job groups. A job can belong to no groups or many. // API name: groups func (r *PutJob) Groups(groups ...string) *PutJob { - r.req.Groups = groups + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range groups { + + r.req.Groups = append(r.req.Groups, v) + } return r } -// ModelPlotConfig This advanced configuration option stores model information along with the +// This advanced configuration option stores model information along with the // results. It provides a more detailed view into anomaly detection. If you // enable model plot it can add considerable overhead to the performance of the // system; it is not feasible for jobs with many entities. Model plot provides a @@ -488,47 +589,64 @@ func (r *PutJob) Groups(groups ...string) *PutJob { // the model plot. Model plot config can be configured when the job is created // or updated later. It must be disabled if performance issues are experienced. // API name: model_plot_config -func (r *PutJob) ModelPlotConfig(modelplotconfig *types.ModelPlotConfig) *PutJob { +func (r *PutJob) ModelPlotConfig(modelplotconfig types.ModelPlotConfigVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ModelPlotConfig = modelplotconfig + r.req.ModelPlotConfig = modelplotconfig.ModelPlotConfigCaster() return r } -// ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old +// Advanced configuration option, which affects the automatic removal of old // model snapshots for this job. It specifies the maximum period of time (in // days) that snapshots are retained. This period is relative to the timestamp // of the most recent snapshot for this job. By default, snapshots ten days // older than the newest snapshot are deleted. // API name: model_snapshot_retention_days func (r *PutJob) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays return r } -// RenormalizationWindowDays Advanced configuration option. The period over which adjustments to the score +// Advanced configuration option. The period over which adjustments to the score // are applied, as new data is seen. The default value is the longer of 30 days // or 100 bucket spans. // API name: renormalization_window_days func (r *PutJob) RenormalizationWindowDays(renormalizationwindowdays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RenormalizationWindowDays = &renormalizationwindowdays return r } -// ResultsIndexName A text string that affects the name of the machine learning results index. By +// A text string that affects the name of the machine learning results index. By // default, the job generates an index named `.ml-anomalies-shared`. // API name: results_index_name func (r *PutJob) ResultsIndexName(indexname string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ResultsIndexName = &indexname return r } -// ResultsRetentionDays Advanced configuration option. The period of time (in days) that results are +// Advanced configuration option. The period of time (in days) that results are // retained. Age is calculated relative to the timestamp of the latest bucket // result. If this property has a non-null value, once per day at 00:30 (server // time), results that are the specified number of days older than the latest @@ -538,6 +656,10 @@ func (r *PutJob) ResultsIndexName(indexname string) *PutJob { // number of days as results. Annotations added by users are retained forever. // API name: results_retention_days func (r *PutJob) ResultsRetentionDays(resultsretentiondays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ResultsRetentionDays = &resultsretentiondays diff --git a/typedapi/ml/putjob/request.go b/typedapi/ml/putjob/request.go index db559e9ccb..34253a48dd 100644 --- a/typedapi/ml/putjob/request.go +++ b/typedapi/ml/putjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putjob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_job/MlPutJobRequest.ts#L30-L112 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_job/MlPutJobRequest.ts#L30-L157 type Request struct { // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there @@ -83,6 +83,10 @@ type Request struct { Description *string `json:"description,omitempty"` // Groups A list of job groups. A job can belong to no groups or many. Groups []string `json:"groups,omitempty"` + // JobId The identifier for the anomaly detection job. This identifier can contain + // lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It + // must start and end with alphanumeric characters. + JobId *string `json:"job_id,omitempty"` // ModelPlotConfig This advanced configuration option stores model information along with the // results. It provides a more detailed view into anomaly detection. If you // enable model plot it can add considerable overhead to the performance of the @@ -226,6 +230,11 @@ func (s *Request) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Groups", err) } + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + case "model_plot_config": if err := dec.Decode(&s.ModelPlotConfig); err != nil { return fmt.Errorf("%s | %w", "ModelPlotConfig", err) diff --git a/typedapi/ml/putjob/response.go b/typedapi/ml/putjob/response.go index 38bdfc4ecf..12ba31469b 100644 --- a/typedapi/ml/putjob/response.go +++ b/typedapi/ml/putjob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putjob @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_job/MlPutJobResponse.ts#L29-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_job/MlPutJobResponse.ts#L29-L52 type Response struct { AllowLazyOpen bool `json:"allow_lazy_open"` AnalysisConfig types.AnalysisConfigRead `json:"analysis_config"` diff --git a/typedapi/ml/puttrainedmodel/put_trained_model.go b/typedapi/ml/puttrainedmodel/put_trained_model.go index a25d6e6fa5..9b04b3f422 100644 --- a/typedapi/ml/puttrainedmodel/put_trained_model.go +++ b/typedapi/ml/puttrainedmodel/put_trained_model.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Enables you to supply a trained model that is not created by data frame +// Create a trained model. +// Enable you to supply a trained model that is not created by data frame // analytics. package puttrainedmodel @@ -83,7 +84,8 @@ func NewPutTrainedModelFunc(tp elastictransport.Interface) NewPutTrainedModel { } } -// Enables you to supply a trained model that is not created by data frame +// Create a trained model. +// Enable you to supply a trained model that is not created by data frame // analytics. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models.html @@ -94,8 +96,6 @@ func New(tp elastictransport.Interface) *PutTrainedModel { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -381,64 +381,84 @@ func (r *PutTrainedModel) Pretty(pretty bool) *PutTrainedModel { return r } -// CompressedDefinition The compressed (GZipped and Base64 encoded) inference definition of the +// The compressed (GZipped and Base64 encoded) inference definition of the // model. If compressed_definition is specified, then definition cannot be // specified. // API name: compressed_definition func (r *PutTrainedModel) CompressedDefinition(compresseddefinition string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.CompressedDefinition = &compresseddefinition return r } -// Definition The inference definition for the model. If definition is specified, then +// The inference definition for the model. If definition is specified, then // compressed_definition cannot be specified. // API name: definition -func (r *PutTrainedModel) Definition(definition *types.Definition) *PutTrainedModel { +func (r *PutTrainedModel) Definition(definition types.DefinitionVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Definition = definition + r.req.Definition = definition.DefinitionCaster() return r } -// Description A human-readable description of the inference trained model. +// A human-readable description of the inference trained model. // API name: description func (r *PutTrainedModel) Description(description string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// InferenceConfig The default configuration for inference. This can be either a regression +// The default configuration for inference. This can be either a regression // or classification configuration. It must match the underlying // definition.trained_model's target_type. For pre-packaged models such as // ELSER the config is not required. // API name: inference_config -func (r *PutTrainedModel) InferenceConfig(inferenceconfig *types.InferenceConfigCreateContainer) *PutTrainedModel { +func (r *PutTrainedModel) InferenceConfig(inferenceconfig types.InferenceConfigCreateContainerVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.InferenceConfig = inferenceconfig + r.req.InferenceConfig = inferenceconfig.InferenceConfigCreateContainerCaster() return r } -// Input The input field names for the model definition. +// The input field names for the model definition. // API name: input -func (r *PutTrainedModel) Input(input *types.Input) *PutTrainedModel { +func (r *PutTrainedModel) Input(input types.InputVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Input = input + r.req.Input = input.InputCaster() return r } -// Metadata An object map that contains metadata about the model. +// An object map that contains metadata about the model. // API name: metadata -// -// metadata should be a json.RawMessage or a structure -// if a structure is provided, the client will defer a json serialization -// prior to sending the payload to Elasticsearch. func (r *PutTrainedModel) Metadata(metadata any) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } switch casted := metadata.(type) { case json.RawMessage: r.req.Metadata = casted @@ -452,30 +472,36 @@ func (r *PutTrainedModel) Metadata(metadata any) *PutTrainedModel { return nil }) } - return r } -// ModelSizeBytes The estimated memory usage in bytes to keep the trained model in memory. +// The estimated memory usage in bytes to keep the trained model in memory. // This property is supported only if defer_definition_decompression is true // or the model definition is not supplied. // API name: model_size_bytes func (r *PutTrainedModel) ModelSizeBytes(modelsizebytes int64) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelSizeBytes = &modelsizebytes return r } -// ModelType The model type. +// The model type. // API name: model_type func (r *PutTrainedModel) ModelType(modeltype trainedmodeltype.TrainedModelType) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelType = &modeltype - return r } -// PlatformArchitecture The platform architecture (if applicable) of the trained mode. If the model +// The platform architecture (if applicable) of the trained mode. If the model // only works on one platform, because it is heavily optimized for a particular // processor architecture and OS combination, then this field specifies which. // The format of the string must match the platform identifiers used by @@ -487,25 +513,40 @@ func (r *PutTrainedModel) ModelType(modeltype trainedmodeltype.TrainedModelType) // architecture or OS features), leave this field unset. // API name: platform_architecture func (r *PutTrainedModel) PlatformArchitecture(platformarchitecture string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.PlatformArchitecture = &platformarchitecture return r } -// PrefixStrings Optional prefix strings applied at inference +// Optional prefix strings applied at inference // API name: prefix_strings -func (r *PutTrainedModel) PrefixStrings(prefixstrings *types.TrainedModelPrefixStrings) *PutTrainedModel { +func (r *PutTrainedModel) PrefixStrings(prefixstrings types.TrainedModelPrefixStringsVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PrefixStrings = prefixstrings + r.req.PrefixStrings = prefixstrings.TrainedModelPrefixStringsCaster() return r } -// Tags An array of tags to organize the model. +// An array of tags to organize the model. // API name: tags func (r *PutTrainedModel) Tags(tags ...string) *PutTrainedModel { - r.req.Tags = tags + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range tags { + r.req.Tags = append(r.req.Tags, v) + + } return r } diff --git a/typedapi/ml/puttrainedmodel/request.go b/typedapi/ml/puttrainedmodel/request.go index debfc7abc5..0096882202 100644 --- a/typedapi/ml/puttrainedmodel/request.go +++ b/typedapi/ml/puttrainedmodel/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttrainedmodel @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package puttrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/MlPutTrainedModelRequest.ts#L29-L124 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/MlPutTrainedModelRequest.ts#L31-L135 type Request struct { // CompressedDefinition The compressed (GZipped and Base64 encoded) inference definition of the diff --git a/typedapi/ml/puttrainedmodel/response.go b/typedapi/ml/puttrainedmodel/response.go index de35f914b0..63d8f4fc43 100644 --- a/typedapi/ml/puttrainedmodel/response.go +++ b/typedapi/ml/puttrainedmodel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttrainedmodel @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package puttrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/MlPutTrainedModelResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/MlPutTrainedModelResponse.ts#L22-L24 type Response struct { CompressedDefinition *string `json:"compressed_definition,omitempty"` // CreateTime The time when the trained model was created. @@ -65,8 +65,9 @@ type Response struct { // created by data frame analytics contain analysis_config and input objects. Metadata *types.TrainedModelConfigMetadata `json:"metadata,omitempty"` // ModelId Identifier for the trained model. - ModelId string `json:"model_id"` - ModelSizeBytes types.ByteSize `json:"model_size_bytes,omitempty"` + ModelId string `json:"model_id"` + ModelPackage *types.ModelPackageConfig `json:"model_package,omitempty"` + ModelSizeBytes types.ByteSize `json:"model_size_bytes,omitempty"` // ModelType The model type ModelType *trainedmodeltype.TrainedModelType `json:"model_type,omitempty"` PrefixStrings *types.TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` @@ -231,6 +232,11 @@ func (s *Response) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ModelId", err) } + case "model_package": + if err := dec.Decode(&s.ModelPackage); err != nil { + return fmt.Errorf("%s | %w", "ModelPackage", err) + } + case "model_size_bytes": if err := dec.Decode(&s.ModelSizeBytes); err != nil { return fmt.Errorf("%s | %w", "ModelSizeBytes", err) diff --git a/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go b/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go index d80bf069b9..9c0d4012c0 100644 --- a/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go +++ b/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a trained model alias. A trained model alias is a logical -// name used to reference a single trained model. +// Create or update a trained model alias. +// A trained model alias is a logical name used to reference a single trained +// model. // You can use aliases instead of trained model identifiers to make it easier to // reference your models. For example, you can use aliases in inference // aggregations and processors. @@ -96,8 +97,9 @@ func NewPutTrainedModelAliasFunc(tp elastictransport.Interface) NewPutTrainedMod } } -// Creates or updates a trained model alias. A trained model alias is a logical -// name used to reference a single trained model. +// Create or update a trained model alias. +// A trained model alias is a logical name used to reference a single trained +// model. // You can use aliases instead of trained model identifiers to make it easier to // reference your models. For example, you can use aliases in inference // aggregations and processors. diff --git a/typedapi/ml/puttrainedmodelalias/response.go b/typedapi/ml/puttrainedmodelalias/response.go index c3abf16e47..1c2d9a4a0f 100644 --- a/typedapi/ml/puttrainedmodelalias/response.go +++ b/typedapi/ml/puttrainedmodelalias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttrainedmodelalias // Response holds the response body struct for the package puttrainedmodelalias // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model_alias/MlPutTrainedModelAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model_alias/MlPutTrainedModelAliasResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go b/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go index 282e800e9c..3c62c4ce61 100644 --- a/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go +++ b/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates part of a trained model definition. +// Create part of a trained model definition. package puttrainedmodeldefinitionpart import ( @@ -86,7 +86,7 @@ func NewPutTrainedModelDefinitionPartFunc(tp elastictransport.Interface) NewPutT } } -// Creates part of a trained model definition. +// Create part of a trained model definition. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html func New(tp elastictransport.Interface) *PutTrainedModelDefinitionPart { @@ -96,8 +96,6 @@ func New(tp elastictransport.Interface) *PutTrainedModelDefinitionPart { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -384,27 +382,40 @@ func (r *PutTrainedModelDefinitionPart) Pretty(pretty bool) *PutTrainedModelDefi return r } -// Definition The definition part for the model. Must be a base64 encoded string. +// The definition part for the model. Must be a base64 encoded string. // API name: definition func (r *PutTrainedModelDefinitionPart) Definition(definition string) *PutTrainedModelDefinitionPart { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Definition = definition return r } -// TotalDefinitionLength The total uncompressed definition length in bytes. Not base64 encoded. +// The total uncompressed definition length in bytes. Not base64 encoded. // API name: total_definition_length func (r *PutTrainedModelDefinitionPart) TotalDefinitionLength(totaldefinitionlength int64) *PutTrainedModelDefinitionPart { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TotalDefinitionLength = totaldefinitionlength return r } -// TotalParts The total number of parts that will be uploaded. Must be greater than 0. +// The total number of parts that will be uploaded. Must be greater than 0. // API name: total_parts func (r *PutTrainedModelDefinitionPart) TotalParts(totalparts int) *PutTrainedModelDefinitionPart { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TotalParts = totalparts return r diff --git a/typedapi/ml/puttrainedmodeldefinitionpart/request.go b/typedapi/ml/puttrainedmodeldefinitionpart/request.go index 46db6b8bb1..2073eb8889 100644 --- a/typedapi/ml/puttrainedmodeldefinitionpart/request.go +++ b/typedapi/ml/puttrainedmodeldefinitionpart/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttrainedmodeldefinitionpart @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package puttrainedmodeldefinitionpart // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartRequest.ts#L24-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartRequest.ts#L24-L65 type Request struct { // Definition The definition part for the model. Must be a base64 encoded string. diff --git a/typedapi/ml/puttrainedmodeldefinitionpart/response.go b/typedapi/ml/puttrainedmodeldefinitionpart/response.go index 861551bd16..935588ae43 100644 --- a/typedapi/ml/puttrainedmodeldefinitionpart/response.go +++ b/typedapi/ml/puttrainedmodeldefinitionpart/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttrainedmodeldefinitionpart // Response holds the response body struct for the package puttrainedmodeldefinitionpart // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go b/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go index a43b619d0b..e210cf2c63 100644 --- a/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go +++ b/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a trained model vocabulary. +// Create a trained model vocabulary. // This API is supported only for natural language processing (NLP) models. // The vocabulary is stored in the index as described in // `inference_config.*.vocabulary` of the trained model definition. @@ -84,7 +84,7 @@ func NewPutTrainedModelVocabularyFunc(tp elastictransport.Interface) NewPutTrain } } -// Creates a trained model vocabulary. +// Create a trained model vocabulary. // This API is supported only for natural language processing (NLP) models. // The vocabulary is stored in the index as described in // `inference_config.*.vocabulary` of the trained model definition. @@ -97,8 +97,6 @@ func New(tp elastictransport.Interface) *PutTrainedModelVocabulary { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,26 +365,47 @@ func (r *PutTrainedModelVocabulary) Pretty(pretty bool) *PutTrainedModelVocabula return r } -// Merges The optional model merges if required by the tokenizer. +// The optional model merges if required by the tokenizer. // API name: merges func (r *PutTrainedModelVocabulary) Merges(merges ...string) *PutTrainedModelVocabulary { - r.req.Merges = merges + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range merges { + + r.req.Merges = append(r.req.Merges, v) + } return r } -// Scores The optional vocabulary value scores if required by the tokenizer. +// The optional vocabulary value scores if required by the tokenizer. // API name: scores func (r *PutTrainedModelVocabulary) Scores(scores ...types.Float64) *PutTrainedModelVocabulary { - r.req.Scores = scores + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range scores { + r.req.Scores = append(r.req.Scores, v) + + } return r } -// Vocabulary The model vocabulary, which must not be empty. +// The model vocabulary, which must not be empty. // API name: vocabulary func (r *PutTrainedModelVocabulary) Vocabulary(vocabularies ...string) *PutTrainedModelVocabulary { - r.req.Vocabulary = vocabularies + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range vocabularies { + r.req.Vocabulary = append(r.req.Vocabulary, v) + + } return r } diff --git a/typedapi/ml/puttrainedmodelvocabulary/request.go b/typedapi/ml/puttrainedmodelvocabulary/request.go index deda1de586..ed93d8cda7 100644 --- a/typedapi/ml/puttrainedmodelvocabulary/request.go +++ b/typedapi/ml/puttrainedmodelvocabulary/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttrainedmodelvocabulary @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package puttrainedmodelvocabulary // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyRequest.ts#L24-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyRequest.ts#L24-L68 type Request struct { // Merges The optional model merges if required by the tokenizer. diff --git a/typedapi/ml/puttrainedmodelvocabulary/response.go b/typedapi/ml/puttrainedmodelvocabulary/response.go index a1929e16e6..b7ce331c1a 100644 --- a/typedapi/ml/puttrainedmodelvocabulary/response.go +++ b/typedapi/ml/puttrainedmodelvocabulary/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttrainedmodelvocabulary // Response holds the response body struct for the package puttrainedmodelvocabulary // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/resetjob/reset_job.go b/typedapi/ml/resetjob/reset_job.go index 84ed992ff8..c1ea577c26 100644 --- a/typedapi/ml/resetjob/reset_job.go +++ b/typedapi/ml/resetjob/reset_job.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Resets an anomaly detection job. +// Reset an anomaly detection job. // All model state and results are deleted. The job is ready to start over as if // it had just been created. // It is not currently possible to reset multiple jobs using wildcards or a @@ -80,7 +80,7 @@ func NewResetJobFunc(tp elastictransport.Interface) NewResetJob { } } -// Resets an anomaly detection job. +// Reset an anomaly detection job. // All model state and results are deleted. The job is ready to start over as if // it had just been created. // It is not currently possible to reset multiple jobs using wildcards or a diff --git a/typedapi/ml/resetjob/response.go b/typedapi/ml/resetjob/response.go index 4b4d4135a4..dc0b821fd0 100644 --- a/typedapi/ml/resetjob/response.go +++ b/typedapi/ml/resetjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package resetjob // Response holds the response body struct for the package resetjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/reset_job/MlResetJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/reset_job/MlResetJobResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/revertmodelsnapshot/request.go b/typedapi/ml/revertmodelsnapshot/request.go index a44ca51351..64cc682f58 100644 --- a/typedapi/ml/revertmodelsnapshot/request.go +++ b/typedapi/ml/revertmodelsnapshot/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package revertmodelsnapshot @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package revertmodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/revert_model_snapshot/MlRevertModelSnapshotRequest.ts#L23-L69 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/revert_model_snapshot/MlRevertModelSnapshotRequest.ts#L23-L77 type Request struct { // DeleteInterveningResults Refer to the description for the `delete_intervening_results` query diff --git a/typedapi/ml/revertmodelsnapshot/response.go b/typedapi/ml/revertmodelsnapshot/response.go index 8faa3333aa..c54b47e65e 100644 --- a/typedapi/ml/revertmodelsnapshot/response.go +++ b/typedapi/ml/revertmodelsnapshot/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package revertmodelsnapshot @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package revertmodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/revert_model_snapshot/MlRevertModelSnapshotResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/revert_model_snapshot/MlRevertModelSnapshotResponse.ts#L22-L24 type Response struct { Model types.ModelSnapshot `json:"model"` } diff --git a/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go b/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go index aca07de51d..2e32332c49 100644 --- a/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go +++ b/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Reverts to a specific snapshot. +// Revert to a snapshot. // The machine learning features react quickly to anomalous input, learning new // behaviors in data. Highly anomalous input increases the variance in the // models whilst the system learns whether this is a new step-change in behavior @@ -93,7 +93,7 @@ func NewRevertModelSnapshotFunc(tp elastictransport.Interface) NewRevertModelSna } } -// Reverts to a specific snapshot. +// Revert to a snapshot. // The machine learning features react quickly to anomalous input, learning new // behaviors in data. Highly anomalous input increases the variance in the // models whilst the system learns whether this is a new step-change in behavior @@ -110,8 +110,6 @@ func New(tp elastictransport.Interface) *RevertModelSnapshot { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -399,10 +397,15 @@ func (r *RevertModelSnapshot) Pretty(pretty bool) *RevertModelSnapshot { return r } -// DeleteInterveningResults Refer to the description for the `delete_intervening_results` query +// Refer to the description for the `delete_intervening_results` query // parameter. // API name: delete_intervening_results func (r *RevertModelSnapshot) DeleteInterveningResults(deleteinterveningresults bool) *RevertModelSnapshot { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DeleteInterveningResults = &deleteinterveningresults return r diff --git a/typedapi/ml/setupgrademode/response.go b/typedapi/ml/setupgrademode/response.go index a69e784a07..27fb39f105 100644 --- a/typedapi/ml/setupgrademode/response.go +++ b/typedapi/ml/setupgrademode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package setupgrademode // Response holds the response body struct for the package setupgrademode // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/set_upgrade_mode/MlSetUpgradeModeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/set_upgrade_mode/MlSetUpgradeModeResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/setupgrademode/set_upgrade_mode.go b/typedapi/ml/setupgrademode/set_upgrade_mode.go index a6addd3587..c79b416778 100644 --- a/typedapi/ml/setupgrademode/set_upgrade_mode.go +++ b/typedapi/ml/setupgrademode/set_upgrade_mode.go @@ -16,8 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Set upgrade_mode for ML indices. // Sets a cluster wide upgrade_mode setting that prepares machine learning // indices for an upgrade. // When upgrading your cluster, in some circumstances you must restart your @@ -79,6 +80,7 @@ func NewSetUpgradeModeFunc(tp elastictransport.Interface) NewSetUpgradeMode { } } +// Set upgrade_mode for ML indices. // Sets a cluster wide upgrade_mode setting that prepares machine learning // indices for an upgrade. // When upgrading your cluster, in some circumstances you must restart your diff --git a/typedapi/ml/startdatafeed/request.go b/typedapi/ml/startdatafeed/request.go index 00e3eb22c1..8dfbfdd721 100644 --- a/typedapi/ml/startdatafeed/request.go +++ b/typedapi/ml/startdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package startdatafeed @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package startdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/start_datafeed/MlStartDatafeedRequest.ts#L24-L91 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/start_datafeed/MlStartDatafeedRequest.ts#L24-L99 type Request struct { // End Refer to the description for the `end` query parameter. diff --git a/typedapi/ml/startdatafeed/response.go b/typedapi/ml/startdatafeed/response.go index 7998b007c5..7111e812b0 100644 --- a/typedapi/ml/startdatafeed/response.go +++ b/typedapi/ml/startdatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package startdatafeed @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package startdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/start_datafeed/MlStartDatafeedResponse.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/start_datafeed/MlStartDatafeedResponse.ts#L22-L34 type Response struct { // Node The ID of the node that the job was started on. In serverless this will be diff --git a/typedapi/ml/startdatafeed/start_datafeed.go b/typedapi/ml/startdatafeed/start_datafeed.go index f2c8ff2076..78c0b00247 100644 --- a/typedapi/ml/startdatafeed/start_datafeed.go +++ b/typedapi/ml/startdatafeed/start_datafeed.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Starts one or more datafeeds. +// Start datafeeds. // // A datafeed must be started in order to retrieve data from Elasticsearch. A // datafeed can be started and stopped @@ -100,7 +100,7 @@ func NewStartDatafeedFunc(tp elastictransport.Interface) NewStartDatafeed { } } -// Starts one or more datafeeds. +// Start datafeeds. // // A datafeed must be started in order to retrieve data from Elasticsearch. A // datafeed can be started and stopped @@ -129,8 +129,6 @@ func New(tp elastictransport.Interface) *StartDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -403,26 +401,41 @@ func (r *StartDatafeed) Pretty(pretty bool) *StartDatafeed { return r } -// End Refer to the description for the `end` query parameter. +// Refer to the description for the `end` query parameter. // API name: end -func (r *StartDatafeed) End(datetime types.DateTime) *StartDatafeed { - r.req.End = datetime +func (r *StartDatafeed) End(datetime types.DateTimeVariant) *StartDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() return r } -// Start Refer to the description for the `start` query parameter. +// Refer to the description for the `start` query parameter. // API name: start -func (r *StartDatafeed) Start(datetime types.DateTime) *StartDatafeed { - r.req.Start = datetime +func (r *StartDatafeed) Start(datetime types.DateTimeVariant) *StartDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() return r } -// Timeout Refer to the description for the `timeout` query parameter. +// Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *StartDatafeed) Timeout(duration types.Duration) *StartDatafeed { - r.req.Timeout = duration +func (r *StartDatafeed) Timeout(duration types.DurationVariant) *StartDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/startdataframeanalytics/response.go b/typedapi/ml/startdataframeanalytics/response.go index d1cf01947c..3f75faa7d1 100644 --- a/typedapi/ml/startdataframeanalytics/response.go +++ b/typedapi/ml/startdataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package startdataframeanalytics // Response holds the response body struct for the package startdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/start_data_frame_analytics/MlStartDataFrameAnalyticsResponse.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/start_data_frame_analytics/MlStartDataFrameAnalyticsResponse.ts#L22-L34 type Response struct { Acknowledged bool `json:"acknowledged"` // Node The ID of the node that the job was started on. If the job is allowed to open diff --git a/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go b/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go index abf95f57a9..0e4f846ab9 100644 --- a/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go +++ b/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Starts a data frame analytics job. +// Start a data frame analytics job. // A data frame analytics job can be started and stopped multiple times // throughout its lifecycle. // If the destination index does not exist, it is created automatically the @@ -87,7 +87,7 @@ func NewStartDataFrameAnalyticsFunc(tp elastictransport.Interface) NewStartDataF } } -// Starts a data frame analytics job. +// Start a data frame analytics job. // A data frame analytics job can be started and stopped multiple times // throughout its lifecycle. // If the destination index does not exist, it is created automatically the diff --git a/typedapi/ml/starttrainedmodeldeployment/request.go b/typedapi/ml/starttrainedmodeldeployment/request.go new file mode 100644 index 0000000000..0cfaf53ed8 --- /dev/null +++ b/typedapi/ml/starttrainedmodeldeployment/request.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package starttrainedmodeldeployment + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package starttrainedmodeldeployment +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentRequest.ts#L30-L111 +type Request struct { + + // AdaptiveAllocations Adaptive allocations configuration. When enabled, the number of allocations + // is set based on the current load. + // If adaptive_allocations is enabled, do not set the number of allocations + // manually. + AdaptiveAllocations *types.AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Starttrainedmodeldeployment request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/ml/starttrainedmodeldeployment/response.go b/typedapi/ml/starttrainedmodeldeployment/response.go index baac61364f..2c0f3c4cdf 100644 --- a/typedapi/ml/starttrainedmodeldeployment/response.go +++ b/typedapi/ml/starttrainedmodeldeployment/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package starttrainedmodeldeployment @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package starttrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentResponse.ts#L22-L26 type Response struct { Assignment types.TrainedModelAssignment `json:"assignment"` } diff --git a/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go b/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go index 5538b7d31c..b3cad111fc 100644 --- a/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go +++ b/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go @@ -16,13 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Starts a trained model deployment, which allocates the model to every machine -// learning node. +// Start a trained model deployment. +// It allocates the model to every machine learning node. package starttrainedmodeldeployment import ( + gobytes "bytes" "context" "encoding/json" "errors" @@ -55,6 +56,10 @@ type StartTrainedModelDeployment struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int modelid string @@ -79,8 +84,8 @@ func NewStartTrainedModelDeploymentFunc(tp elastictransport.Interface) NewStartT } } -// Starts a trained model deployment, which allocates the model to every machine -// learning node. +// Start a trained model deployment. +// It allocates the model to every machine learning node. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trained-model-deployment.html func New(tp elastictransport.Interface) *StartTrainedModelDeployment { @@ -88,6 +93,8 @@ func New(tp elastictransport.Interface) *StartTrainedModelDeployment { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -99,6 +106,21 @@ func New(tp elastictransport.Interface) *StartTrainedModelDeployment { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *StartTrainedModelDeployment) Raw(raw io.Reader) *StartTrainedModelDeployment { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *StartTrainedModelDeployment) Request(req *Request) *StartTrainedModelDeployment { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *StartTrainedModelDeployment) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -108,6 +130,31 @@ func (r *StartTrainedModelDeployment) HttpRequest(ctx context.Context) (*http.Re var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for StartTrainedModelDeployment: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -258,45 +305,6 @@ func (r StartTrainedModelDeployment) Do(providedCtx context.Context) (*Response, return nil, errorResponse } -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r StartTrainedModelDeployment) IsSuccess(providedCtx context.Context) (bool, error) { - var ctx context.Context - r.spanStarted = true - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - ctx = instrument.Start(providedCtx, "ml.start_trained_model_deployment") - defer instrument.Close(ctx) - } - if ctx == nil { - ctx = providedCtx - } - - res, err := r.Perform(ctx) - - if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err - } - - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil - } - - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the StartTrainedModelDeployment query execution, status code: %d", res.StatusCode) - if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { - instrument.RecordError(ctx, err) - } - return false, err - } - - return false, nil -} - // Header set a key, value pair in the StartTrainedModelDeployment headers map. func (r *StartTrainedModelDeployment) Header(key, value string) *StartTrainedModelDeployment { r.headers.Set(key, value) @@ -341,6 +349,8 @@ func (r *StartTrainedModelDeployment) DeploymentId(deploymentid string) *StartTr // If this setting is greater than the number of hardware threads // it will automatically be changed to a value less than the number of hardware // threads. +// If adaptive_allocations is enabled, do not set this value, because it’s +// automatically set. // API name: number_of_allocations func (r *StartTrainedModelDeployment) NumberOfAllocations(numberofallocations int) *StartTrainedModelDeployment { r.values.Set("number_of_allocations", strconv.Itoa(numberofallocations)) @@ -442,3 +452,19 @@ func (r *StartTrainedModelDeployment) Pretty(pretty bool) *StartTrainedModelDepl return r } + +// Adaptive allocations configuration. When enabled, the number of allocations +// is set based on the current load. +// If adaptive_allocations is enabled, do not set the number of allocations +// manually. +// API name: adaptive_allocations +func (r *StartTrainedModelDeployment) AdaptiveAllocations(adaptiveallocations types.AdaptiveAllocationsSettingsVariant) *StartTrainedModelDeployment { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdaptiveAllocations = adaptiveallocations.AdaptiveAllocationsSettingsCaster() + + return r +} diff --git a/typedapi/ml/stopdatafeed/request.go b/typedapi/ml/stopdatafeed/request.go index f2b1e542da..1e30609d91 100644 --- a/typedapi/ml/stopdatafeed/request.go +++ b/typedapi/ml/stopdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stopdatafeed @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package stopdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/stop_datafeed/MlStopDatafeedRequest.ts#L24-L78 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/stop_datafeed/MlStopDatafeedRequest.ts#L24-L86 type Request struct { // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. diff --git a/typedapi/ml/stopdatafeed/response.go b/typedapi/ml/stopdatafeed/response.go index 7c32713040..2f602adde4 100644 --- a/typedapi/ml/stopdatafeed/response.go +++ b/typedapi/ml/stopdatafeed/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stopdatafeed // Response holds the response body struct for the package stopdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/stop_datafeed/MlStopDatafeedResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/stop_datafeed/MlStopDatafeedResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` } diff --git a/typedapi/ml/stopdatafeed/stop_datafeed.go b/typedapi/ml/stopdatafeed/stop_datafeed.go index f69adfa07e..3b87a0fc6c 100644 --- a/typedapi/ml/stopdatafeed/stop_datafeed.go +++ b/typedapi/ml/stopdatafeed/stop_datafeed.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Stops one or more datafeeds. +// Stop datafeeds. // A datafeed that is stopped ceases to retrieve data from Elasticsearch. A // datafeed can be started and stopped // multiple times throughout its lifecycle. @@ -84,7 +84,7 @@ func NewStopDatafeedFunc(tp elastictransport.Interface) NewStopDatafeed { } } -// Stops one or more datafeeds. +// Stop datafeeds. // A datafeed that is stopped ceases to retrieve data from Elasticsearch. A // datafeed can be started and stopped // multiple times throughout its lifecycle. @@ -97,8 +97,6 @@ func New(tp elastictransport.Interface) *StopDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -371,26 +369,41 @@ func (r *StopDatafeed) Pretty(pretty bool) *StopDatafeed { return r } -// AllowNoMatch Refer to the description for the `allow_no_match` query parameter. +// Refer to the description for the `allow_no_match` query parameter. // API name: allow_no_match func (r *StopDatafeed) AllowNoMatch(allownomatch bool) *StopDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowNoMatch = &allownomatch return r } -// Force Refer to the description for the `force` query parameter. +// Refer to the description for the `force` query parameter. // API name: force func (r *StopDatafeed) Force(force bool) *StopDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Force = &force return r } -// Timeout Refer to the description for the `timeout` query parameter. +// Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *StopDatafeed) Timeout(duration types.Duration) *StopDatafeed { - r.req.Timeout = duration +func (r *StopDatafeed) Timeout(duration types.DurationVariant) *StopDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/ml/stopdataframeanalytics/response.go b/typedapi/ml/stopdataframeanalytics/response.go index 49989a4b07..a4d493a2d8 100644 --- a/typedapi/ml/stopdataframeanalytics/response.go +++ b/typedapi/ml/stopdataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stopdataframeanalytics // Response holds the response body struct for the package stopdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/stop_data_frame_analytics/MlStopDataFrameAnalyticsResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/stop_data_frame_analytics/MlStopDataFrameAnalyticsResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` } diff --git a/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go b/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go index e3c77040e4..2440d18c00 100644 --- a/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go +++ b/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Stops one or more data frame analytics jobs. +// Stop data frame analytics jobs. // A data frame analytics job can be started and stopped multiple times // throughout its lifecycle. package stopdataframeanalytics @@ -78,7 +78,7 @@ func NewStopDataFrameAnalyticsFunc(tp elastictransport.Interface) NewStopDataFra } } -// Stops one or more data frame analytics jobs. +// Stop data frame analytics jobs. // A data frame analytics job can be started and stopped multiple times // throughout its lifecycle. // diff --git a/typedapi/ml/stoptrainedmodeldeployment/response.go b/typedapi/ml/stoptrainedmodeldeployment/response.go index 95acd962a8..4d61a67a98 100644 --- a/typedapi/ml/stoptrainedmodeldeployment/response.go +++ b/typedapi/ml/stoptrainedmodeldeployment/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stoptrainedmodeldeployment // Response holds the response body struct for the package stoptrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` } diff --git a/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go b/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go index f1ff1fa996..da8a6d1b05 100644 --- a/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go +++ b/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Stops a trained model deployment. +// Stop a trained model deployment. package stoptrainedmodeldeployment import ( @@ -76,7 +76,7 @@ func NewStopTrainedModelDeploymentFunc(tp elastictransport.Interface) NewStopTra } } -// Stops a trained model deployment. +// Stop a trained model deployment. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-trained-model-deployment.html func New(tp elastictransport.Interface) *StopTrainedModelDeployment { diff --git a/typedapi/ml/updatedatafeed/request.go b/typedapi/ml/updatedatafeed/request.go index 284904a3e7..384e6795dc 100644 --- a/typedapi/ml/updatedatafeed/request.go +++ b/typedapi/ml/updatedatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatedatafeed @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package updatedatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L162 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L170 type Request struct { // Aggregations If set, the datafeed performs aggregation searches. Support for aggregations diff --git a/typedapi/ml/updatedatafeed/response.go b/typedapi/ml/updatedatafeed/response.go index 16e7dec02b..48c1591a92 100644 --- a/typedapi/ml/updatedatafeed/response.go +++ b/typedapi/ml/updatedatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatedatafeed @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatedatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_datafeed/MlUpdateDatafeedResponse.ts#L31-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_datafeed/MlUpdateDatafeedResponse.ts#L31-L49 type Response struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Authorization *types.DatafeedAuthorization `json:"authorization,omitempty"` diff --git a/typedapi/ml/updatedatafeed/update_datafeed.go b/typedapi/ml/updatedatafeed/update_datafeed.go index 8f42223fa0..de16fec79a 100644 --- a/typedapi/ml/updatedatafeed/update_datafeed.go +++ b/typedapi/ml/updatedatafeed/update_datafeed.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the properties of a datafeed. +// Update a datafeed. // You must stop and start the datafeed for the changes to be applied. // When Elasticsearch security features are enabled, your datafeed remembers // which roles the user who updated it had at @@ -88,7 +88,7 @@ func NewUpdateDatafeedFunc(tp elastictransport.Interface) NewUpdateDatafeed { } } -// Updates the properties of a datafeed. +// Update a datafeed. // You must stop and start the datafeed for the changes to be applied. // When Elasticsearch security features are enabled, your datafeed remembers // which roles the user who updated it had at @@ -104,8 +104,6 @@ func New(tp elastictransport.Interface) *UpdateDatafeed { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -427,31 +425,56 @@ func (r *UpdateDatafeed) Pretty(pretty bool) *UpdateDatafeed { return r } -// Aggregations If set, the datafeed performs aggregation searches. Support for aggregations +// If set, the datafeed performs aggregation searches. Support for aggregations // is limited and should be used only // with low cardinality data. // API name: aggregations func (r *UpdateDatafeed) Aggregations(aggregations map[string]types.Aggregations) *UpdateDatafeed { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *UpdateDatafeed) AddAggregation(key string, value types.AggregationsVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp return r } -// ChunkingConfig Datafeeds might search over long time periods, for several months or years. +// Datafeeds might search over long time periods, for several months or years. // This search is split into time // chunks in order to ensure the load on Elasticsearch is managed. Chunking // configuration controls how the size of // these time chunks are calculated; it is an advanced configuration option. // API name: chunking_config -func (r *UpdateDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *UpdateDatafeed { +func (r *UpdateDatafeed) ChunkingConfig(chunkingconfig types.ChunkingConfigVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ChunkingConfig = chunkingconfig + r.req.ChunkingConfig = chunkingconfig.ChunkingConfigCaster() return r } -// DelayedDataCheckConfig Specifies whether the datafeed checks for missing data and the size of the +// Specifies whether the datafeed checks for missing data and the size of the // window. The datafeed can optionally // search over indices that have already been read in an effort to determine // whether any data has subsequently been @@ -461,14 +484,18 @@ func (r *UpdateDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *U // This check runs only on real-time // datafeeds. // API name: delayed_data_check_config -func (r *UpdateDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.DelayedDataCheckConfig) *UpdateDatafeed { +func (r *UpdateDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig types.DelayedDataCheckConfigVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DelayedDataCheckConfig = delayeddatacheckconfig + r.req.DelayedDataCheckConfig = delayeddatacheckconfig.DelayedDataCheckConfigCaster() return r } -// Frequency The interval at which scheduled queries are made while the datafeed runs in +// The interval at which scheduled queries are made while the datafeed runs in // real time. The default value is // either the bucket span for short bucket spans, or, for longer bucket spans, a // sensible fraction of the bucket @@ -478,39 +505,60 @@ func (r *UpdateDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.De // datafeed uses aggregations, this value // must be divisible by the interval of the date histogram aggregation. // API name: frequency -func (r *UpdateDatafeed) Frequency(duration types.Duration) *UpdateDatafeed { - r.req.Frequency = duration +func (r *UpdateDatafeed) Frequency(duration types.DurationVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } -// Indices An array of index names. Wildcards are supported. If any of the indices are +// An array of index names. Wildcards are supported. If any of the indices are // in remote clusters, the machine // learning nodes must have the `remote_cluster_client` role. // API name: indices func (r *UpdateDatafeed) Indices(indices ...string) *UpdateDatafeed { - r.req.Indices = indices + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + + r.req.Indices = append(r.req.Indices, v) + } return r } -// IndicesOptions Specifies index expansion options that are used during search. +// Specifies index expansion options that are used during search. // API name: indices_options -func (r *UpdateDatafeed) IndicesOptions(indicesoptions *types.IndicesOptions) *UpdateDatafeed { +func (r *UpdateDatafeed) IndicesOptions(indicesoptions types.IndicesOptionsVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndicesOptions = indicesoptions + r.req.IndicesOptions = indicesoptions.IndicesOptionsCaster() return r } // API name: job_id func (r *UpdateDatafeed) JobId(id string) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.JobId = &id return r } -// MaxEmptySearches If a real-time datafeed has never seen any data (including during any initial +// If a real-time datafeed has never seen any data (including during any initial // training period), it automatically // stops and closes the associated job after this many real-time searches return // no documents. In other words, @@ -520,12 +568,17 @@ func (r *UpdateDatafeed) JobId(id string) *UpdateDatafeed { // default, it is not set. // API name: max_empty_searches func (r *UpdateDatafeed) MaxEmptySearches(maxemptysearches int) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxEmptySearches = &maxemptysearches return r } -// Query The Elasticsearch query domain-specific language (DSL). This value +// The Elasticsearch query domain-specific language (DSL). This value // corresponds to the query object in an // Elasticsearch search POST body. All the options that are supported by // Elasticsearch can be used, as this @@ -539,14 +592,18 @@ func (r *UpdateDatafeed) MaxEmptySearches(maxemptysearches int) *UpdateDatafeed // in parallel and close one // when you are satisfied with the results of the job. // API name: query -func (r *UpdateDatafeed) Query(query *types.Query) *UpdateDatafeed { +func (r *UpdateDatafeed) Query(query types.QueryVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// QueryDelay The number of seconds behind real time that data is queried. For example, if +// The number of seconds behind real time that data is queried. For example, if // data from 10:04 a.m. might // not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 // seconds. The default @@ -554,37 +611,73 @@ func (r *UpdateDatafeed) Query(query *types.Query) *UpdateDatafeed { // the query performance // when there are multiple jobs running on the same node. // API name: query_delay -func (r *UpdateDatafeed) QueryDelay(duration types.Duration) *UpdateDatafeed { - r.req.QueryDelay = duration +func (r *UpdateDatafeed) QueryDelay(duration types.DurationVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryDelay = *duration.DurationCaster() return r } -// RuntimeMappings Specifies runtime fields for the datafeed search. +// Specifies runtime fields for the datafeed search. // API name: runtime_mappings -func (r *UpdateDatafeed) RuntimeMappings(runtimefields types.RuntimeFields) *UpdateDatafeed { - r.req.RuntimeMappings = runtimefields +func (r *UpdateDatafeed) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// ScriptFields Specifies scripts that evaluate custom expressions and returns script fields +// Specifies scripts that evaluate custom expressions and returns script fields // to the datafeed. // The detector configuration objects in a job can contain functions that use // these script fields. // API name: script_fields func (r *UpdateDatafeed) ScriptFields(scriptfields map[string]types.ScriptField) *UpdateDatafeed { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ScriptFields = scriptfields + return r +} + +func (r *UpdateDatafeed) AddScriptField(key string, value types.ScriptFieldVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + r.req.ScriptFields = tmp return r } -// ScrollSize The size parameter that is used in Elasticsearch searches when the datafeed +// The size parameter that is used in Elasticsearch searches when the datafeed // does not use aggregations. // The maximum value is the value of `index.max_result_window`. // API name: scroll_size func (r *UpdateDatafeed) ScrollSize(scrollsize int) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScrollSize = &scrollsize return r diff --git a/typedapi/ml/updatedataframeanalytics/request.go b/typedapi/ml/updatedataframeanalytics/request.go index 8ff7d1548c..a1f2d152ee 100644 --- a/typedapi/ml/updatedataframeanalytics/request.go +++ b/typedapi/ml/updatedataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatedataframeanalytics @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatedataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L80 type Request struct { // AllowLazyStart Specifies whether this job can start when there is insufficient machine diff --git a/typedapi/ml/updatedataframeanalytics/response.go b/typedapi/ml/updatedataframeanalytics/response.go index 6dc3669624..5eeff6255d 100644 --- a/typedapi/ml/updatedataframeanalytics/response.go +++ b/typedapi/ml/updatedataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatedataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatedataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsResponse.ts#L30-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsResponse.ts#L30-L45 type Response struct { AllowLazyStart bool `json:"allow_lazy_start"` Analysis types.DataframeAnalysisContainer `json:"analysis"` diff --git a/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go b/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go index f26b0e0cb8..4c69bc5f5c 100644 --- a/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go +++ b/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates an existing data frame analytics job. +// Update a data frame analytics job. package updatedataframeanalytics import ( @@ -81,7 +81,7 @@ func NewUpdateDataFrameAnalyticsFunc(tp elastictransport.Interface) NewUpdateDat } } -// Updates an existing data frame analytics job. +// Update a data frame analytics job. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalytics.html func New(tp elastictransport.Interface) *UpdateDataFrameAnalytics { @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateDataFrameAnalytics { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -365,42 +363,60 @@ func (r *UpdateDataFrameAnalytics) Pretty(pretty bool) *UpdateDataFrameAnalytics return r } -// AllowLazyStart Specifies whether this job can start when there is insufficient machine +// Specifies whether this job can start when there is insufficient machine // learning node capacity for it to be immediately assigned to a node. // API name: allow_lazy_start func (r *UpdateDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyStart = &allowlazystart return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *UpdateDataFrameAnalytics) Description(description string) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// MaxNumThreads The maximum number of threads to be used by the analysis. Using more +// The maximum number of threads to be used by the analysis. Using more // threads may decrease the time necessary to complete the analysis at the // cost of using more CPU. Note that the process may use additional threads // for operational functionality other than the analysis itself. // API name: max_num_threads func (r *UpdateDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxNumThreads = &maxnumthreads return r } -// ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for +// The approximate maximum amount of memory resources that are permitted for // analytical processing. If your `elasticsearch.yml` file contains an // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try // to create data frame analytics jobs that have `model_memory_limit` values // greater than that setting. // API name: model_memory_limit func (r *UpdateDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelMemoryLimit = &modelmemorylimit diff --git a/typedapi/ml/updatefilter/request.go b/typedapi/ml/updatefilter/request.go index 8509f2058a..500167ef8a 100644 --- a/typedapi/ml/updatefilter/request.go +++ b/typedapi/ml/updatefilter/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatefilter @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatefilter // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_filter/MlUpdateFilterRequest.ts#L23-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_filter/MlUpdateFilterRequest.ts#L23-L60 type Request struct { // AddItems The items to add to the filter. diff --git a/typedapi/ml/updatefilter/response.go b/typedapi/ml/updatefilter/response.go index 28a5bea956..d274ce326f 100644 --- a/typedapi/ml/updatefilter/response.go +++ b/typedapi/ml/updatefilter/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatefilter // Response holds the response body struct for the package updatefilter // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_filter/MlUpdateFilterResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_filter/MlUpdateFilterResponse.ts#L22-L28 type Response struct { Description string `json:"description"` FilterId string `json:"filter_id"` diff --git a/typedapi/ml/updatefilter/update_filter.go b/typedapi/ml/updatefilter/update_filter.go index e1226100e3..359c09ff76 100644 --- a/typedapi/ml/updatefilter/update_filter.go +++ b/typedapi/ml/updatefilter/update_filter.go @@ -16,8 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Update a filter. // Updates the description of a filter, adds items, or removes items from the // list. package updatefilter @@ -82,6 +83,7 @@ func NewUpdateFilterFunc(tp elastictransport.Interface) NewUpdateFilter { } } +// Update a filter. // Updates the description of a filter, adds items, or removes items from the // list. // @@ -93,8 +95,6 @@ func New(tp elastictransport.Interface) *UpdateFilter { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -363,27 +363,45 @@ func (r *UpdateFilter) Pretty(pretty bool) *UpdateFilter { return r } -// AddItems The items to add to the filter. +// The items to add to the filter. // API name: add_items func (r *UpdateFilter) AddItems(additems ...string) *UpdateFilter { - r.req.AddItems = additems + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range additems { + r.req.AddItems = append(r.req.AddItems, v) + + } return r } -// Description A description for the filter. +// A description for the filter. // API name: description func (r *UpdateFilter) Description(description string) *UpdateFilter { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// RemoveItems The items to remove from the filter. +// The items to remove from the filter. // API name: remove_items func (r *UpdateFilter) RemoveItems(removeitems ...string) *UpdateFilter { - r.req.RemoveItems = removeitems + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range removeitems { + r.req.RemoveItems = append(r.req.RemoveItems, v) + + } return r } diff --git a/typedapi/ml/updatejob/request.go b/typedapi/ml/updatejob/request.go index 72e3d246f3..4e40756c41 100644 --- a/typedapi/ml/updatejob/request.go +++ b/typedapi/ml/updatejob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatejob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package updatejob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_job/MlUpdateJobRequest.ts#L33-L138 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_job/MlUpdateJobRequest.ts#L33-L147 type Request struct { // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when @@ -74,7 +74,7 @@ type Request struct { // Description A description of the job. Description *string `json:"description,omitempty"` // Detectors An array of detector update objects. - Detectors []types.Detector `json:"detectors,omitempty"` + Detectors []types.DetectorUpdate `json:"detectors,omitempty"` // Groups A list of job groups. A job can belong to no groups or many. Groups []string `json:"groups,omitempty"` ModelPlotConfig *types.ModelPlotConfig `json:"model_plot_config,omitempty"` diff --git a/typedapi/ml/updatejob/response.go b/typedapi/ml/updatejob/response.go index 17266b75cd..28b3966168 100644 --- a/typedapi/ml/updatejob/response.go +++ b/typedapi/ml/updatejob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatejob @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatejob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_job/MlUpdateJobResponse.ts#L29-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_job/MlUpdateJobResponse.ts#L29-L53 type Response struct { AllowLazyOpen bool `json:"allow_lazy_open"` AnalysisConfig types.AnalysisConfigRead `json:"analysis_config"` diff --git a/typedapi/ml/updatejob/update_job.go b/typedapi/ml/updatejob/update_job.go index eabd06d3a5..efb3fb8bb9 100644 --- a/typedapi/ml/updatejob/update_job.go +++ b/typedapi/ml/updatejob/update_job.go @@ -16,8 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Update an anomaly detection job. // Updates certain properties of an anomaly detection job. package updatejob @@ -81,6 +82,7 @@ func NewUpdateJobFunc(tp elastictransport.Interface) NewUpdateJob { } } +// Update an anomaly detection job. // Updates certain properties of an anomaly detection job. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html @@ -91,8 +93,6 @@ func New(tp elastictransport.Interface) *UpdateJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,7 +361,7 @@ func (r *UpdateJob) Pretty(pretty bool) *UpdateJob { return r } -// AllowLazyOpen Advanced configuration option. Specifies whether this job can open when +// Advanced configuration option. Specifies whether this job can open when // there is insufficient machine learning node capacity for it to be // immediately assigned to a node. If `false` and a machine learning node // with capacity to run the job cannot immediately be found, the open @@ -372,20 +372,29 @@ func (r *UpdateJob) Pretty(pretty bool) *UpdateJob { // machine learning node capacity is available. // API name: allow_lazy_open func (r *UpdateJob) AllowLazyOpen(allowlazyopen bool) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AllowLazyOpen = &allowlazyopen return r } // API name: analysis_limits -func (r *UpdateJob) AnalysisLimits(analysislimits *types.AnalysisMemoryLimit) *UpdateJob { +func (r *UpdateJob) AnalysisLimits(analysislimits types.AnalysisMemoryLimitVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisLimits = analysislimits + r.req.AnalysisLimits = analysislimits.AnalysisMemoryLimitCaster() return r } -// BackgroundPersistInterval Advanced configuration option. The time between each periodic persistence +// Advanced configuration option. The time between each periodic persistence // of the model. // The default value is a randomized value between 3 to 4 hours, which // avoids all jobs persisting at exactly the same time. The smallest allowed @@ -396,31 +405,64 @@ func (r *UpdateJob) AnalysisLimits(analysislimits *types.AnalysisMemoryLimit) *U // close the job, then reopen the job and restart the datafeed for the // changes to take effect. // API name: background_persist_interval -func (r *UpdateJob) BackgroundPersistInterval(duration types.Duration) *UpdateJob { - r.req.BackgroundPersistInterval = duration +func (r *UpdateJob) BackgroundPersistInterval(duration types.DurationVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.BackgroundPersistInterval = *duration.DurationCaster() return r } // API name: categorization_filters func (r *UpdateJob) CategorizationFilters(categorizationfilters ...string) *UpdateJob { - r.req.CategorizationFilters = categorizationfilters + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range categorizationfilters { + r.req.CategorizationFilters = append(r.req.CategorizationFilters, v) + + } return r } -// CustomSettings Advanced configuration option. Contains custom meta data about the job. +// Advanced configuration option. Contains custom meta data about the job. // For example, it can contain custom URL information as shown in Adding // custom URLs to machine learning results. // API name: custom_settings func (r *UpdateJob) CustomSettings(customsettings map[string]json.RawMessage) *UpdateJob { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.CustomSettings = customsettings + return r +} + +func (r *UpdateJob) AddCustomSetting(key string, value json.RawMessage) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.CustomSettings == nil { + r.req.CustomSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.CustomSettings + } + + tmp[key] = value + r.req.CustomSettings = tmp return r } -// DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old +// Advanced configuration option, which affects the automatic removal of old // model snapshots for this job. It specifies a period of time (in days) // after which only the first snapshot per day is retained. This period is // relative to the timestamp of the most recent snapshot for this job. Valid @@ -429,84 +471,127 @@ func (r *UpdateJob) CustomSettings(customsettings map[string]json.RawMessage) *U // `model_snapshot_retention_days`. // API name: daily_model_snapshot_retention_after_days func (r *UpdateJob) DailyModelSnapshotRetentionAfterDays(dailymodelsnapshotretentionafterdays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.DailyModelSnapshotRetentionAfterDays = &dailymodelsnapshotretentionafterdays return r } -// Description A description of the job. +// A description of the job. // API name: description func (r *UpdateJob) Description(description string) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Detectors An array of detector update objects. +// An array of detector update objects. // API name: detectors -func (r *UpdateJob) Detectors(detectors ...types.Detector) *UpdateJob { - r.req.Detectors = detectors +func (r *UpdateJob) Detectors(detectors ...types.DetectorUpdateVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range detectors { + r.req.Detectors = append(r.req.Detectors, *v.DetectorUpdateCaster()) + + } return r } -// Groups A list of job groups. A job can belong to no groups or many. +// A list of job groups. A job can belong to no groups or many. // API name: groups func (r *UpdateJob) Groups(groups ...string) *UpdateJob { - r.req.Groups = groups + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range groups { + + r.req.Groups = append(r.req.Groups, v) + } return r } // API name: model_plot_config -func (r *UpdateJob) ModelPlotConfig(modelplotconfig *types.ModelPlotConfig) *UpdateJob { +func (r *UpdateJob) ModelPlotConfig(modelplotconfig types.ModelPlotConfigVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ModelPlotConfig = modelplotconfig + r.req.ModelPlotConfig = modelplotconfig.ModelPlotConfigCaster() return r } // API name: model_prune_window -func (r *UpdateJob) ModelPruneWindow(duration types.Duration) *UpdateJob { - r.req.ModelPruneWindow = duration +func (r *UpdateJob) ModelPruneWindow(duration types.DurationVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelPruneWindow = *duration.DurationCaster() return r } -// ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old +// Advanced configuration option, which affects the automatic removal of old // model snapshots for this job. It specifies the maximum period of time (in // days) that snapshots are retained. This period is relative to the // timestamp of the most recent snapshot for this job. // API name: model_snapshot_retention_days func (r *UpdateJob) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays return r } -// PerPartitionCategorization Settings related to how categorization interacts with partition fields. +// Settings related to how categorization interacts with partition fields. // API name: per_partition_categorization -func (r *UpdateJob) PerPartitionCategorization(perpartitioncategorization *types.PerPartitionCategorization) *UpdateJob { +func (r *UpdateJob) PerPartitionCategorization(perpartitioncategorization types.PerPartitionCategorizationVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.PerPartitionCategorization = perpartitioncategorization + r.req.PerPartitionCategorization = perpartitioncategorization.PerPartitionCategorizationCaster() return r } -// RenormalizationWindowDays Advanced configuration option. The period over which adjustments to the +// Advanced configuration option. The period over which adjustments to the // score are applied, as new data is seen. // API name: renormalization_window_days func (r *UpdateJob) RenormalizationWindowDays(renormalizationwindowdays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RenormalizationWindowDays = &renormalizationwindowdays return r } -// ResultsRetentionDays Advanced configuration option. The period of time (in days) that results +// Advanced configuration option. The period of time (in days) that results // are retained. Age is calculated relative to the timestamp of the latest // bucket result. If this property has a non-null value, once per day at // 00:30 (server time), results that are the specified number of days older @@ -514,6 +599,10 @@ func (r *UpdateJob) RenormalizationWindowDays(renormalizationwindowdays int64) * // value is null, which means all results are retained. // API name: results_retention_days func (r *UpdateJob) ResultsRetentionDays(resultsretentiondays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ResultsRetentionDays = &resultsretentiondays diff --git a/typedapi/ml/updatemodelsnapshot/request.go b/typedapi/ml/updatemodelsnapshot/request.go index 0d0b80e131..2ca31c17b5 100644 --- a/typedapi/ml/updatemodelsnapshot/request.go +++ b/typedapi/ml/updatemodelsnapshot/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatemodelsnapshot @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatemodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_model_snapshot/MlUpdateModelSnapshotRequest.ts#L23-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_model_snapshot/MlUpdateModelSnapshotRequest.ts#L23-L63 type Request struct { // Description A description of the model snapshot. diff --git a/typedapi/ml/updatemodelsnapshot/response.go b/typedapi/ml/updatemodelsnapshot/response.go index b2406f0db5..2610b92125 100644 --- a/typedapi/ml/updatemodelsnapshot/response.go +++ b/typedapi/ml/updatemodelsnapshot/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatemodelsnapshot @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatemodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_model_snapshot/MlUpdateModelSnapshotResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_model_snapshot/MlUpdateModelSnapshotResponse.ts#L22-L27 type Response struct { Acknowledged bool `json:"acknowledged"` Model types.ModelSnapshot `json:"model"` diff --git a/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go b/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go index 9b41beec43..03bb71fd6c 100644 --- a/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go +++ b/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go @@ -16,8 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Update a snapshot. // Updates certain properties of a snapshot. package updatemodelsnapshot @@ -86,6 +87,7 @@ func NewUpdateModelSnapshotFunc(tp elastictransport.Interface) NewUpdateModelSna } } +// Update a snapshot. // Updates certain properties of a snapshot. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html @@ -96,8 +98,6 @@ func New(tp elastictransport.Interface) *UpdateModelSnapshot { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -383,20 +383,29 @@ func (r *UpdateModelSnapshot) Pretty(pretty bool) *UpdateModelSnapshot { return r } -// Description A description of the model snapshot. +// A description of the model snapshot. // API name: description func (r *UpdateModelSnapshot) Description(description string) *UpdateModelSnapshot { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Retain If `true`, this snapshot will not be deleted during automatic cleanup of +// If `true`, this snapshot will not be deleted during automatic cleanup of // snapshots older than `model_snapshot_retention_days`. However, this // snapshot will be deleted when the job is deleted. // API name: retain func (r *UpdateModelSnapshot) Retain(retain bool) *UpdateModelSnapshot { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Retain = &retain return r diff --git a/typedapi/ml/updatetrainedmodeldeployment/request.go b/typedapi/ml/updatetrainedmodeldeployment/request.go index 342fbdd589..be03a4e1af 100644 --- a/typedapi/ml/updatetrainedmodeldeployment/request.go +++ b/typedapi/ml/updatetrainedmodeldeployment/request.go @@ -16,20 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatetrainedmodeldeployment import ( "encoding/json" "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Request holds the request body struct for the package updatetrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentRequest.ts#L24-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentRequest.ts#L25-L78 type Request struct { + // AdaptiveAllocations Adaptive allocations configuration. When enabled, the number of allocations + // is set based on the current load. + // If adaptive_allocations is enabled, do not set the number of allocations + // manually. + AdaptiveAllocations *types.AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` // NumberOfAllocations The number of model allocations on each node where the model is deployed. // All allocations on a node share the same copy of the model in memory but use // a separate set of threads to evaluate the model. @@ -37,6 +44,8 @@ type Request struct { // If this setting is greater than the number of hardware threads // it will automatically be changed to a value less than the number of hardware // threads. + // If adaptive_allocations is enabled, do not set this value, because it’s + // automatically set. NumberOfAllocations *int `json:"number_of_allocations,omitempty"` } diff --git a/typedapi/ml/updatetrainedmodeldeployment/response.go b/typedapi/ml/updatetrainedmodeldeployment/response.go index 3b09b9d55c..f7b1f3241a 100644 --- a/typedapi/ml/updatetrainedmodeldeployment/response.go +++ b/typedapi/ml/updatetrainedmodeldeployment/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatetrainedmodeldeployment @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatetrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentResponse.ts#L22-L26 type Response struct { Assignment types.TrainedModelAssignment `json:"assignment"` } diff --git a/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go b/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go index 3c71230a36..a539ede8f6 100644 --- a/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go +++ b/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Starts a trained model deployment, which allocates the model to every machine -// learning node. +// Update a trained model deployment. package updatetrainedmodeldeployment import ( @@ -82,8 +81,7 @@ func NewUpdateTrainedModelDeploymentFunc(tp elastictransport.Interface) NewUpdat } } -// Starts a trained model deployment, which allocates the model to every machine -// learning node. +// Update a trained model deployment. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-trained-model-deployment.html func New(tp elastictransport.Interface) *UpdateTrainedModelDeployment { @@ -93,8 +91,6 @@ func New(tp elastictransport.Interface) *UpdateTrainedModelDeployment { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -366,15 +362,38 @@ func (r *UpdateTrainedModelDeployment) Pretty(pretty bool) *UpdateTrainedModelDe return r } -// NumberOfAllocations The number of model allocations on each node where the model is deployed. +// Adaptive allocations configuration. When enabled, the number of allocations +// is set based on the current load. +// If adaptive_allocations is enabled, do not set the number of allocations +// manually. +// API name: adaptive_allocations +func (r *UpdateTrainedModelDeployment) AdaptiveAllocations(adaptiveallocations types.AdaptiveAllocationsSettingsVariant) *UpdateTrainedModelDeployment { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdaptiveAllocations = adaptiveallocations.AdaptiveAllocationsSettingsCaster() + + return r +} + +// The number of model allocations on each node where the model is deployed. // All allocations on a node share the same copy of the model in memory but use // a separate set of threads to evaluate the model. // Increasing this value generally increases the throughput. // If this setting is greater than the number of hardware threads // it will automatically be changed to a value less than the number of hardware // threads. +// If adaptive_allocations is enabled, do not set this value, because it’s +// automatically set. // API name: number_of_allocations func (r *UpdateTrainedModelDeployment) NumberOfAllocations(numberofallocations int) *UpdateTrainedModelDeployment { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.NumberOfAllocations = &numberofallocations return r diff --git a/typedapi/ml/upgradejobsnapshot/response.go b/typedapi/ml/upgradejobsnapshot/response.go index 64bfbd256a..19192776de 100644 --- a/typedapi/ml/upgradejobsnapshot/response.go +++ b/typedapi/ml/upgradejobsnapshot/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package upgradejobsnapshot // Response holds the response body struct for the package upgradejobsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/upgrade_job_snapshot/MlUpgradeJobSnapshotResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/upgrade_job_snapshot/MlUpgradeJobSnapshotResponse.ts#L22-L31 type Response struct { // Completed When true, this means the task is complete. When false, it is still running. diff --git a/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go b/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go index a906d15937..eaff75944a 100644 --- a/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go +++ b/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Upgrades an anomaly detection model snapshot to the latest major version. +// Upgrade a snapshot. +// Upgrade an anomaly detection model snapshot to the latest major version. // Over time, older snapshot formats are deprecated and removed. Anomaly // detection jobs support only snapshots that are from the current or previous // major version. @@ -89,7 +90,8 @@ func NewUpgradeJobSnapshotFunc(tp elastictransport.Interface) NewUpgradeJobSnaps } } -// Upgrades an anomaly detection model snapshot to the latest major version. +// Upgrade a snapshot. +// Upgrade an anomaly detection model snapshot to the latest major version. // Over time, older snapshot formats are deprecated and removed. Anomaly // detection jobs support only snapshots that are from the current or previous // major version. diff --git a/typedapi/ml/validate/request.go b/typedapi/ml/validate/request.go index 0a6d5617c9..24a3cfba0a 100644 --- a/typedapi/ml/validate/request.go +++ b/typedapi/ml/validate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package validate @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package validate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/validate/MlValidateJobRequest.ts#L27-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/validate/MlValidateJobRequest.ts#L27-L52 type Request struct { AnalysisConfig *types.AnalysisConfig `json:"analysis_config,omitempty"` AnalysisLimits *types.AnalysisLimits `json:"analysis_limits,omitempty"` diff --git a/typedapi/ml/validate/response.go b/typedapi/ml/validate/response.go index f93088f998..5ff5ea70de 100644 --- a/typedapi/ml/validate/response.go +++ b/typedapi/ml/validate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package validate // Response holds the response body struct for the package validate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/validate/MlValidateJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/validate/MlValidateJobResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/validate/validate.go b/typedapi/ml/validate/validate.go index b034732d15..bef2730c9e 100644 --- a/typedapi/ml/validate/validate.go +++ b/typedapi/ml/validate/validate.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Validates an anomaly detection job. +// Validate an anomaly detection job. package validate import ( @@ -73,7 +73,7 @@ func NewValidateFunc(tp elastictransport.Interface) NewValidate { } } -// Validates an anomaly detection job. +// Validate an anomaly detection job. // // https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html func New(tp elastictransport.Interface) *Validate { @@ -83,8 +83,6 @@ func New(tp elastictransport.Interface) *Validate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -339,31 +337,47 @@ func (r *Validate) Pretty(pretty bool) *Validate { } // API name: analysis_config -func (r *Validate) AnalysisConfig(analysisconfig *types.AnalysisConfig) *Validate { +func (r *Validate) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisConfig = analysisconfig + r.req.AnalysisConfig = analysisconfig.AnalysisConfigCaster() return r } // API name: analysis_limits -func (r *Validate) AnalysisLimits(analysislimits *types.AnalysisLimits) *Validate { +func (r *Validate) AnalysisLimits(analysislimits types.AnalysisLimitsVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.AnalysisLimits = analysislimits + r.req.AnalysisLimits = analysislimits.AnalysisLimitsCaster() return r } // API name: data_description -func (r *Validate) DataDescription(datadescription *types.DataDescription) *Validate { +func (r *Validate) DataDescription(datadescription types.DataDescriptionVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.DataDescription = datadescription + r.req.DataDescription = datadescription.DataDescriptionCaster() return r } // API name: description func (r *Validate) Description(description string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description @@ -372,21 +386,35 @@ func (r *Validate) Description(description string) *Validate { // API name: job_id func (r *Validate) JobId(id string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.JobId = &id return r } // API name: model_plot -func (r *Validate) ModelPlot(modelplot *types.ModelPlotConfig) *Validate { +func (r *Validate) ModelPlot(modelplot types.ModelPlotConfigVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ModelPlot = modelplot + r.req.ModelPlot = modelplot.ModelPlotConfigCaster() return r } // API name: model_snapshot_id func (r *Validate) ModelSnapshotId(id string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ModelSnapshotId = &id return r @@ -394,6 +422,10 @@ func (r *Validate) ModelSnapshotId(id string) *Validate { // API name: model_snapshot_retention_days func (r *Validate) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays @@ -402,6 +434,11 @@ func (r *Validate) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) // API name: results_index_name func (r *Validate) ResultsIndexName(indexname string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ResultsIndexName = &indexname return r diff --git a/typedapi/ml/validatedetector/request.go b/typedapi/ml/validatedetector/request.go index 1150405e42..a1318cf44f 100644 --- a/typedapi/ml/validatedetector/request.go +++ b/typedapi/ml/validatedetector/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package validatedetector @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package validatedetector // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/validate_detector/MlValidateDetectorRequest.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/validate_detector/MlValidateDetectorRequest.ts#L23-L40 type Request = types.Detector // NewRequest returns a Request diff --git a/typedapi/ml/validatedetector/response.go b/typedapi/ml/validatedetector/response.go index c3158e2400..85dcd15873 100644 --- a/typedapi/ml/validatedetector/response.go +++ b/typedapi/ml/validatedetector/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package validatedetector // Response holds the response body struct for the package validatedetector // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/validate_detector/MlValidateDetectorResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/validate_detector/MlValidateDetectorResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/ml/validatedetector/validate_detector.go b/typedapi/ml/validatedetector/validate_detector.go index 0051310506..05b60d9994 100644 --- a/typedapi/ml/validatedetector/validate_detector.go +++ b/typedapi/ml/validatedetector/validate_detector.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Validates an anomaly detection detector. +// Validate an anomaly detection job. package validatedetector import ( @@ -74,9 +74,9 @@ func NewValidateDetectorFunc(tp elastictransport.Interface) NewValidateDetector } } -// Validates an anomaly detection detector. +// Validate an anomaly detection job. // -// https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8 func New(tp elastictransport.Interface) *ValidateDetector { r := &ValidateDetector{ transport: tp, @@ -84,8 +84,6 @@ func New(tp elastictransport.Interface) *ValidateDetector { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -341,99 +339,147 @@ func (r *ValidateDetector) Pretty(pretty bool) *ValidateDetector { return r } -// ByFieldName The field used to split the data. In particular, this property is used for +// The field used to split the data. In particular, this property is used for // analyzing the splits with respect to their own history. It is used for // finding unusual values in the context of the split. // API name: by_field_name func (r *ValidateDetector) ByFieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ByFieldName = &field return r } -// CustomRules Custom rules enable you to customize the way detectors operate. For example, +// Custom rules enable you to customize the way detectors operate. For example, // a rule may dictate conditions under which results should be skipped. Kibana // refers to custom rules as job rules. // API name: custom_rules -func (r *ValidateDetector) CustomRules(customrules ...types.DetectionRule) *ValidateDetector { - r.req.CustomRules = customrules +func (r *ValidateDetector) CustomRules(customrules ...types.DetectionRuleVariant) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range customrules { + + r.req.CustomRules = append(r.req.CustomRules, *v.DetectionRuleCaster()) + } return r } -// DetectorDescription A description of the detector. +// A description of the detector. // API name: detector_description func (r *ValidateDetector) DetectorDescription(detectordescription string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.DetectorDescription = &detectordescription return r } -// DetectorIndex A unique identifier for the detector. This identifier is based on the order +// A unique identifier for the detector. This identifier is based on the order // of the detectors in the `analysis_config`, starting at zero. If you specify a // value for this property, it is ignored. // API name: detector_index func (r *ValidateDetector) DetectorIndex(detectorindex int) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.DetectorIndex = &detectorindex return r } -// ExcludeFrequent If set, frequent entities are excluded from influencing the anomaly results. +// If set, frequent entities are excluded from influencing the anomaly results. // Entities can be considered frequent over time or frequent in a population. If // you are working with both over and by fields, you can set `exclude_frequent` // to `all` for both fields, or to `by` or `over` for those specific fields. // API name: exclude_frequent func (r *ValidateDetector) ExcludeFrequent(excludefrequent excludefrequent.ExcludeFrequent) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ExcludeFrequent = &excludefrequent - return r } -// FieldName The field that the detector uses in the function. If you use an event rate +// The field that the detector uses in the function. If you use an event rate // function such as count or rare, do not specify this field. The `field_name` // cannot contain double quotes or backslashes. // API name: field_name func (r *ValidateDetector) FieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FieldName = &field return r } -// Function The analysis function that is used. For example, `count`, `rare`, `mean`, +// The analysis function that is used. For example, `count`, `rare`, `mean`, // `min`, `max`, or `sum`. // API name: function func (r *ValidateDetector) Function(function string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Function = &function return r } -// OverFieldName The field used to split the data. In particular, this property is used for +// The field used to split the data. In particular, this property is used for // analyzing the splits with respect to the history of all splits. It is used // for finding unusual values in the population of all splits. // API name: over_field_name func (r *ValidateDetector) OverFieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.OverFieldName = &field return r } -// PartitionFieldName The field used to segment the analysis. When you use this property, you have +// The field used to segment the analysis. When you use this property, you have // completely independent baselines for each value of this field. // API name: partition_field_name func (r *ValidateDetector) PartitionFieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.PartitionFieldName = &field return r } -// UseNull Defines whether a new series is used as the null series when there is no +// Defines whether a new series is used as the null series when there is no // value for the by or partition fields. // API name: use_null func (r *ValidateDetector) UseNull(usenull bool) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.UseNull = &usenull return r diff --git a/typedapi/monitoring/bulk/bulk.go b/typedapi/monitoring/bulk/bulk.go index 95f3cd2d4c..e35615b4de 100644 --- a/typedapi/monitoring/bulk/bulk.go +++ b/typedapi/monitoring/bulk/bulk.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Used by the monitoring features to send monitoring data. +// Send monitoring data. +// This API is used by the monitoring features to send monitoring data. package bulk import ( @@ -79,9 +80,10 @@ func NewBulkFunc(tp elastictransport.Interface) NewBulk { } } -// Used by the monitoring features to send monitoring data. +// Send monitoring data. +// This API is used by the monitoring features to send monitoring data. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/monitor-elasticsearch-cluster.html +// https://www.elastic.co/docs/api/doc/elasticsearch/v8 func New(tp elastictransport.Interface) *Bulk { r := &Bulk{ transport: tp, diff --git a/typedapi/monitoring/bulk/request.go b/typedapi/monitoring/bulk/request.go index 37f6d0a7f9..8c2474ddca 100644 --- a/typedapi/monitoring/bulk/request.go +++ b/typedapi/monitoring/bulk/request.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package bulk // Request holds the request body struct for the package bulk // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/monitoring/bulk/BulkMonitoringRequest.ts#L24-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/monitoring/bulk/BulkMonitoringRequest.ts#L24-L73 type Request = []any diff --git a/typedapi/monitoring/bulk/response.go b/typedapi/monitoring/bulk/response.go index f23bb9b72c..2f8753fbfa 100644 --- a/typedapi/monitoring/bulk/response.go +++ b/typedapi/monitoring/bulk/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package bulk @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package bulk // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/monitoring/bulk/BulkMonitoringResponse.ts#L23-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/monitoring/bulk/BulkMonitoringResponse.ts#L23-L32 type Response struct { Error *types.ErrorCause `json:"error,omitempty"` // Errors True if there is was an error diff --git a/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go b/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go index d678ed55da..6792886fd7 100644 --- a/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go +++ b/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// You can use this API to clear the archived repositories metering information -// in the cluster. +// Clear the archived repositories metering. +// Clear the archived repositories metering information in the cluster. package clearrepositoriesmeteringarchive import ( @@ -82,8 +82,8 @@ func NewClearRepositoriesMeteringArchiveFunc(tp elastictransport.Interface) NewC } } -// You can use this API to clear the archived repositories metering information -// in the cluster. +// Clear the archived repositories metering. +// Clear the archived repositories metering information in the cluster. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html func New(tp elastictransport.Interface) *ClearRepositoriesMeteringArchive { @@ -304,8 +304,6 @@ func (r *ClearRepositoriesMeteringArchive) Header(key, value string) *ClearRepos } // NodeId Comma-separated list of node IDs or names used to limit returned information. -// All the nodes selective options are explained -// [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). // API Name: nodeid func (r *ClearRepositoriesMeteringArchive) _nodeid(nodeid string) *ClearRepositoriesMeteringArchive { r.paramSet |= nodeidMask @@ -314,9 +312,7 @@ func (r *ClearRepositoriesMeteringArchive) _nodeid(nodeid string) *ClearReposito return r } -// MaxArchiveVersion Specifies the maximum -// [archive_version](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html#get-repositories-metering-api-response-body) -// to be cleared from the archive. +// MaxArchiveVersion Specifies the maximum `archive_version` to be cleared from the archive. // API Name: maxarchiveversion func (r *ClearRepositoriesMeteringArchive) _maxarchiveversion(maxarchiveversion string) *ClearRepositoriesMeteringArchive { r.paramSet |= maxarchiveversionMask diff --git a/typedapi/nodes/clearrepositoriesmeteringarchive/response.go b/typedapi/nodes/clearrepositoriesmeteringarchive/response.go index ad036491ba..6aa4b11187 100644 --- a/typedapi/nodes/clearrepositoriesmeteringarchive/response.go +++ b/typedapi/nodes/clearrepositoriesmeteringarchive/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearrepositoriesmeteringarchive @@ -26,11 +26,10 @@ import ( // Response holds the response body struct for the package clearrepositoriesmeteringarchive // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L37-L39 type Response struct { - // ClusterName Name of the cluster. Based on the [Cluster name - // setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). + // ClusterName Name of the cluster. Based on the `cluster.name` setting. ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node // filters. diff --git a/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go b/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go index e682ba9b72..57a6ef1f2c 100644 --- a/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go +++ b/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go @@ -16,15 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// You can use the cluster repositories metering API to retrieve repositories -// metering information in a cluster. -// This API exposes monotonically non-decreasing counters and it’s expected that -// clients would durably store the -// information needed to compute aggregations over a period of time. -// Additionally, the information exposed by this -// API is volatile, meaning that it won’t be present after node restarts. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get cluster repositories metering. +// Get repositories metering information for a cluster. +// This API exposes monotonically non-decreasing counters and it is expected +// that clients would durably store the information needed to compute +// aggregations over a period of time. +// Additionally, the information exposed by this API is volatile, meaning that +// it will not be present after node restarts. package getrepositoriesmeteringinfo import ( @@ -82,13 +82,13 @@ func NewGetRepositoriesMeteringInfoFunc(tp elastictransport.Interface) NewGetRep } } -// You can use the cluster repositories metering API to retrieve repositories -// metering information in a cluster. -// This API exposes monotonically non-decreasing counters and it’s expected that -// clients would durably store the -// information needed to compute aggregations over a period of time. -// Additionally, the information exposed by this -// API is volatile, meaning that it won’t be present after node restarts. +// Get cluster repositories metering. +// Get repositories metering information for a cluster. +// This API exposes monotonically non-decreasing counters and it is expected +// that clients would durably store the information needed to compute +// aggregations over a period of time. +// Additionally, the information exposed by this API is volatile, meaning that +// it will not be present after node restarts. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html func New(tp elastictransport.Interface) *GetRepositoriesMeteringInfo { diff --git a/typedapi/nodes/getrepositoriesmeteringinfo/response.go b/typedapi/nodes/getrepositoriesmeteringinfo/response.go index c0731a6054..dbd665c50a 100644 --- a/typedapi/nodes/getrepositoriesmeteringinfo/response.go +++ b/typedapi/nodes/getrepositoriesmeteringinfo/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getrepositoriesmeteringinfo @@ -26,11 +26,10 @@ import ( // Response holds the response body struct for the package getrepositoriesmeteringinfo // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L36-L38 type Response struct { - // ClusterName Name of the cluster. Based on the [Cluster name - // setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). + // ClusterName Name of the cluster. Based on the `cluster.name` setting. ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node // filters. diff --git a/typedapi/nodes/hotthreads/hot_threads.go b/typedapi/nodes/hotthreads/hot_threads.go index 443c2fa7f3..883f38a8eb 100644 --- a/typedapi/nodes/hotthreads/hot_threads.go +++ b/typedapi/nodes/hotthreads/hot_threads.go @@ -16,11 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// This API yields a breakdown of the hot threads on each selected node in the -// cluster. -// The output is plain text with a breakdown of each node’s top hot threads. +// Get the hot threads for nodes. +// Get a breakdown of the hot threads on each selected node in the cluster. +// The output is plain text with a breakdown of the top hot threads for each +// node. package hotthreads import ( @@ -77,9 +78,10 @@ func NewHotThreadsFunc(tp elastictransport.Interface) NewHotThreads { } } -// This API yields a breakdown of the hot threads on each selected node in the -// cluster. -// The output is plain text with a breakdown of each node’s top hot threads. +// Get the hot threads for nodes. +// Get a breakdown of the hot threads on each selected node in the cluster. +// The output is plain text with a breakdown of the top hot threads for each +// node. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-hot-threads.html func New(tp elastictransport.Interface) *HotThreads { @@ -334,16 +336,6 @@ func (r *HotThreads) Snapshots(snapshots string) *HotThreads { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response -// is received before the timeout expires, the request fails and -// returns an error. -// API name: master_timeout -func (r *HotThreads) MasterTimeout(duration string) *HotThreads { - r.values.Set("master_timeout", duration) - - return r -} - // Threads Specifies the number of hot threads to provide information for. // API name: threads func (r *HotThreads) Threads(threads string) *HotThreads { diff --git a/typedapi/nodes/hotthreads/response.go b/typedapi/nodes/hotthreads/response.go index 65d45e7fd8..7200401629 100644 --- a/typedapi/nodes/hotthreads/response.go +++ b/typedapi/nodes/hotthreads/response.go @@ -16,19 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package hotthreads -import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types" -) - // Response holds the response body struct for the package hotthreads // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/hot_threads/NodesHotThreadsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/hot_threads/NodesHotThreadsResponse.ts#L20-L22 type Response struct { - HotThreads []types.HotThread `json:"hot_threads"` } // NewResponse returns a Response diff --git a/typedapi/nodes/info/info.go b/typedapi/nodes/info/info.go index 464be17942..82c1630945 100644 --- a/typedapi/nodes/info/info.go +++ b/typedapi/nodes/info/info.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns cluster nodes information. +// Get node information. +// +// By default, the API returns all attributes and core settings for cluster +// nodes. package info import ( @@ -77,7 +80,10 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { } } -// Returns cluster nodes information. +// Get node information. +// +// By default, the API returns all attributes and core settings for cluster +// nodes. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html func New(tp elastictransport.Interface) *Info { @@ -349,15 +355,6 @@ func (r *Info) FlatSettings(flatsettings bool) *Info { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. -// API name: master_timeout -func (r *Info) MasterTimeout(duration string) *Info { - r.values.Set("master_timeout", duration) - - return r -} - // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout diff --git a/typedapi/nodes/info/response.go b/typedapi/nodes/info/response.go index 32872d66e0..20205db524 100644 --- a/typedapi/nodes/info/response.go +++ b/typedapi/nodes/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/NodesInfoResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/NodesInfoResponse.ts#L30-L32 type Response struct { ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node diff --git a/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go b/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go index c594e0a5a4..b6e42bf4cc 100644 --- a/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go +++ b/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go @@ -16,9 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Reloads the keystore on nodes in the cluster. +// Reload the keystore on nodes in the cluster. +// +// Secure settings are stored in an on-disk keystore. Certain of these settings +// are reloadable. +// That is, you can change them on disk and reload them without restarting any +// nodes in the cluster. +// When you have updated reloadable secure settings in your keystore, you can +// use this API to reload those settings on each node. +// +// When the Elasticsearch keystore is password protected and not simply +// obfuscated, you must provide the password for the keystore when you reload +// the secure settings. +// Reloading the settings for the whole cluster assumes that the keystores for +// all nodes are protected with the same password; this method is allowed only +// when inter-node communications are encrypted. +// Alternatively, you can reload the secure settings on each node by locally +// accessing the API and passing the node-specific Elasticsearch keystore +// password. package reloadsecuresettings import ( @@ -79,9 +96,26 @@ func NewReloadSecureSettingsFunc(tp elastictransport.Interface) NewReloadSecureS } } -// Reloads the keystore on nodes in the cluster. +// Reload the keystore on nodes in the cluster. +// +// Secure settings are stored in an on-disk keystore. Certain of these settings +// are reloadable. +// That is, you can change them on disk and reload them without restarting any +// nodes in the cluster. +// When you have updated reloadable secure settings in your keystore, you can +// use this API to reload those settings on each node. +// +// When the Elasticsearch keystore is password protected and not simply +// obfuscated, you must provide the password for the keystore when you reload +// the secure settings. +// Reloading the settings for the whole cluster assumes that the keystores for +// all nodes are protected with the same password; this method is allowed only +// when inter-node communications are encrypted. +// Alternatively, you can reload the secure settings on each node by locally +// accessing the API and passing the node-specific Elasticsearch keystore +// password. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html#reloadable-secure-settings +// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-reload-secure-settings.html func New(tp elastictransport.Interface) *ReloadSecureSettings { r := &ReloadSecureSettings{ transport: tp, @@ -89,8 +123,6 @@ func New(tp elastictransport.Interface) *ReloadSecureSettings { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -374,9 +406,14 @@ func (r *ReloadSecureSettings) Pretty(pretty bool) *ReloadSecureSettings { return r } -// SecureSettingsPassword The password for the Elasticsearch keystore. +// The password for the Elasticsearch keystore. // API name: secure_settings_password func (r *ReloadSecureSettings) SecureSettingsPassword(password string) *ReloadSecureSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SecureSettingsPassword = &password return r diff --git a/typedapi/nodes/reloadsecuresettings/request.go b/typedapi/nodes/reloadsecuresettings/request.go index 6b4f9fed55..90aaac7026 100644 --- a/typedapi/nodes/reloadsecuresettings/request.go +++ b/typedapi/nodes/reloadsecuresettings/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package reloadsecuresettings @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package reloadsecuresettings // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/reload_secure_settings/ReloadSecureSettingsRequest.ts#L24-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/reload_secure_settings/ReloadSecureSettingsRequest.ts#L24-L70 type Request struct { // SecureSettingsPassword The password for the Elasticsearch keystore. diff --git a/typedapi/nodes/reloadsecuresettings/response.go b/typedapi/nodes/reloadsecuresettings/response.go index d998d922c7..b806226360 100644 --- a/typedapi/nodes/reloadsecuresettings/response.go +++ b/typedapi/nodes/reloadsecuresettings/response.go @@ -16,23 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package reloadsecuresettings import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Response holds the response body struct for the package reloadsecuresettings // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/reload_secure_settings/ReloadSecureSettingsResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/reload_secure_settings/ReloadSecureSettingsResponse.ts#L30-L32 type Response struct { ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node @@ -48,40 +42,3 @@ func NewResponse() *Response { } return r } - -func (s *Response) UnmarshalJSON(data []byte) error { - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "cluster_name": - if err := dec.Decode(&s.ClusterName); err != nil { - return fmt.Errorf("%s | %w", "ClusterName", err) - } - - case "_nodes": - if err := dec.Decode(&s.NodeStats); err != nil { - return fmt.Errorf("%s | %w", "NodeStats", err) - } - - case "nodes": - if s.Nodes == nil { - s.Nodes = make(map[string]types.NodeReloadResult, 0) - } - if err := dec.Decode(&s.Nodes); err != nil { - return fmt.Errorf("%s | %w", "Nodes", err) - } - - } - } - return nil -} diff --git a/typedapi/nodes/stats/response.go b/typedapi/nodes/stats/response.go index 83bac926ae..efbc5223df 100644 --- a/typedapi/nodes/stats/response.go +++ b/typedapi/nodes/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/stats/NodesStatsResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/stats/NodesStatsResponse.ts#L30-L32 type Response struct { ClusterName *string `json:"cluster_name,omitempty"` // NodeStats Contains statistics about the number of nodes selected by the request’s node diff --git a/typedapi/nodes/stats/stats.go b/typedapi/nodes/stats/stats.go index 286ba59bc6..652dc71859 100644 --- a/typedapi/nodes/stats/stats.go +++ b/typedapi/nodes/stats/stats.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns cluster nodes statistics. +// Get node statistics. +// Get statistics for nodes in a cluster. +// By default, all stats are returned. You can limit the returned information by +// using metrics. package stats import ( @@ -81,7 +84,10 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Returns cluster nodes statistics. +// Get node statistics. +// Get statistics for nodes in a cluster. +// By default, all stats are returned. You can limit the returned information by +// using metrics. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html func New(tp elastictransport.Interface) *Stats { @@ -459,15 +465,6 @@ func (r *Stats) Level(level level.Level) *Stats { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. -// API name: master_timeout -func (r *Stats) MasterTimeout(duration string) *Stats { - r.values.Set("master_timeout", duration) - - return r -} - // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout diff --git a/typedapi/nodes/usage/response.go b/typedapi/nodes/usage/response.go index b0bb12560b..d781eb3bd2 100644 --- a/typedapi/nodes/usage/response.go +++ b/typedapi/nodes/usage/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package usage @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package usage // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/usage/NodesUsageResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/usage/NodesUsageResponse.ts#L30-L32 type Response struct { ClusterName string `json:"cluster_name"` // NodeStats Contains statistics about the number of nodes selected by the request’s node diff --git a/typedapi/nodes/usage/usage.go b/typedapi/nodes/usage/usage.go index 8caa120486..db299ff295 100644 --- a/typedapi/nodes/usage/usage.go +++ b/typedapi/nodes/usage/usage.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information on the usage of features. +// Get feature usage information. package usage import ( @@ -77,7 +77,7 @@ func NewUsageFunc(tp elastictransport.Interface) NewUsage { } } -// Returns information on the usage of features. +// Get feature usage information. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-usage.html func New(tp elastictransport.Interface) *Usage { diff --git a/typedapi/profiling/flamegraph/flamegraph.go b/typedapi/profiling/flamegraph/flamegraph.go index 9255d60081..331f9d3aa8 100644 --- a/typedapi/profiling/flamegraph/flamegraph.go +++ b/typedapi/profiling/flamegraph/flamegraph.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Extracts a UI-optimized structure to render flamegraphs from Universal // Profiling. diff --git a/typedapi/profiling/stacktraces/stacktraces.go b/typedapi/profiling/stacktraces/stacktraces.go index 2f5a52b7ca..940f4ddda2 100644 --- a/typedapi/profiling/stacktraces/stacktraces.go +++ b/typedapi/profiling/stacktraces/stacktraces.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Extracts raw stacktrace information from Universal Profiling. package stacktraces diff --git a/typedapi/profiling/status/status.go b/typedapi/profiling/status/status.go index 9e95acd334..c0dae2f099 100644 --- a/typedapi/profiling/status/status.go +++ b/typedapi/profiling/status/status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Returns basic information about the status of Universal Profiling. package status diff --git a/typedapi/profiling/topnfunctions/topn_functions.go b/typedapi/profiling/topnfunctions/topn_functions.go index c4a564d3d9..e02c07cdf5 100644 --- a/typedapi/profiling/topnfunctions/topn_functions.go +++ b/typedapi/profiling/topnfunctions/topn_functions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Extracts a list of topN functions from Universal Profiling. package topnfunctions diff --git a/typedapi/queryrules/deleterule/delete_rule.go b/typedapi/queryrules/deleterule/delete_rule.go index 3be18fbc89..db48619f46 100644 --- a/typedapi/queryrules/deleterule/delete_rule.go +++ b/typedapi/queryrules/deleterule/delete_rule.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a query rule within a query ruleset. +// Delete a query rule. +// Delete a query rule within a query ruleset. +// This is a destructive action that is only recoverable by re-adding the same +// rule with the create or update query rule API. package deleterule import ( @@ -81,7 +84,10 @@ func NewDeleteRuleFunc(tp elastictransport.Interface) NewDeleteRule { } } -// Deletes a query rule within a query ruleset. +// Delete a query rule. +// Delete a query rule within a query ruleset. +// This is a destructive action that is only recoverable by re-adding the same +// rule with the create or update query rule API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-query-rule.html func New(tp elastictransport.Interface) *DeleteRule { diff --git a/typedapi/queryrules/deleterule/response.go b/typedapi/queryrules/deleterule/response.go index 9e8c329e71..8bb964f7ba 100644 --- a/typedapi/queryrules/deleterule/response.go +++ b/typedapi/queryrules/deleterule/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleterule // Response holds the response body struct for the package deleterule // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/delete_rule/QueryRuleDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/delete_rule/QueryRuleDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/queryrules/deleteruleset/delete_ruleset.go b/typedapi/queryrules/deleteruleset/delete_ruleset.go index 6edaa0b77c..57650c469f 100644 --- a/typedapi/queryrules/deleteruleset/delete_ruleset.go +++ b/typedapi/queryrules/deleteruleset/delete_ruleset.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a query ruleset. +// Delete a query ruleset. +// Remove a query ruleset and its associated data. +// This is a destructive action that is not recoverable. package deleteruleset import ( @@ -76,7 +78,9 @@ func NewDeleteRulesetFunc(tp elastictransport.Interface) NewDeleteRuleset { } } -// Deletes a query ruleset. +// Delete a query ruleset. +// Remove a query ruleset and its associated data. +// This is a destructive action that is not recoverable. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-query-ruleset.html func New(tp elastictransport.Interface) *DeleteRuleset { diff --git a/typedapi/queryrules/deleteruleset/response.go b/typedapi/queryrules/deleteruleset/response.go index b4f940ffcf..2fafcdcf08 100644 --- a/typedapi/queryrules/deleteruleset/response.go +++ b/typedapi/queryrules/deleteruleset/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteruleset // Response holds the response body struct for the package deleteruleset // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/delete_ruleset/QueryRulesetDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/delete_ruleset/QueryRulesetDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/queryrules/getrule/get_rule.go b/typedapi/queryrules/getrule/get_rule.go index 414671ec2e..4b544b6aae 100644 --- a/typedapi/queryrules/getrule/get_rule.go +++ b/typedapi/queryrules/getrule/get_rule.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the details about a query rule within a query ruleset +// Get a query rule. +// Get details about a query rule within a query ruleset. package getrule import ( @@ -81,7 +82,8 @@ func NewGetRuleFunc(tp elastictransport.Interface) NewGetRule { } } -// Returns the details about a query rule within a query ruleset +// Get a query rule. +// Get details about a query rule within a query ruleset. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-query-rule.html func New(tp elastictransport.Interface) *GetRule { diff --git a/typedapi/queryrules/getrule/response.go b/typedapi/queryrules/getrule/response.go index ab4863ebfd..1c1cd63052 100644 --- a/typedapi/queryrules/getrule/response.go +++ b/typedapi/queryrules/getrule/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getrule @@ -27,13 +27,24 @@ import ( // Response holds the response body struct for the package getrule // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/get_rule/QueryRuleGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/get_rule/QueryRuleGetResponse.ts#L22-L24 type Response struct { - Actions types.QueryRuleActions `json:"actions"` - Criteria []types.QueryRuleCriteria `json:"criteria"` - Priority *int `json:"priority,omitempty"` - RuleId string `json:"rule_id"` - Type queryruletype.QueryRuleType `json:"type"` + + // Actions The actions to take when the rule is matched. + // The format of this action depends on the rule type. + Actions types.QueryRuleActions `json:"actions"` + // Criteria The criteria that must be met for the rule to be applied. + // If multiple criteria are specified for a rule, all criteria must be met for + // the rule to be applied. + Criteria []types.QueryRuleCriteria `json:"criteria"` + Priority *int `json:"priority,omitempty"` + // RuleId A unique identifier for the rule. + RuleId string `json:"rule_id"` + // Type The type of rule. + // `pinned` will identify and pin specific documents to the top of search + // results. + // `exclude` will exclude specific documents from search results. + Type queryruletype.QueryRuleType `json:"type"` } // NewResponse returns a Response diff --git a/typedapi/queryrules/getruleset/get_ruleset.go b/typedapi/queryrules/getruleset/get_ruleset.go index 13b3067d9c..ddb251b40d 100644 --- a/typedapi/queryrules/getruleset/get_ruleset.go +++ b/typedapi/queryrules/getruleset/get_ruleset.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the details about a query ruleset +// Get a query ruleset. +// Get details about a query ruleset. package getruleset import ( @@ -76,7 +77,8 @@ func NewGetRulesetFunc(tp elastictransport.Interface) NewGetRuleset { } } -// Returns the details about a query ruleset +// Get a query ruleset. +// Get details about a query ruleset. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-query-ruleset.html func New(tp elastictransport.Interface) *GetRuleset { diff --git a/typedapi/queryrules/getruleset/response.go b/typedapi/queryrules/getruleset/response.go index 20335b9ac5..7eb3c8fa5e 100644 --- a/typedapi/queryrules/getruleset/response.go +++ b/typedapi/queryrules/getruleset/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getruleset @@ -26,12 +26,12 @@ import ( // Response holds the response body struct for the package getruleset // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/get_ruleset/QueryRulesetGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/get_ruleset/QueryRulesetGetResponse.ts#L22-L24 type Response struct { - // Rules Rules associated with the query ruleset + // Rules Rules associated with the query ruleset. Rules []types.QueryRule `json:"rules"` - // RulesetId Query Ruleset unique identifier + // RulesetId A unique identifier for the ruleset. RulesetId string `json:"ruleset_id"` } diff --git a/typedapi/queryrules/listrulesets/list_rulesets.go b/typedapi/queryrules/listrulesets/list_rulesets.go index 6695df3355..1f56370314 100644 --- a/typedapi/queryrules/listrulesets/list_rulesets.go +++ b/typedapi/queryrules/listrulesets/list_rulesets.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns summarized information about existing query rulesets. +// Get all query rulesets. +// Get summarized information about the query rulesets. package listrulesets import ( @@ -68,7 +69,8 @@ func NewListRulesetsFunc(tp elastictransport.Interface) NewListRulesets { } } -// Returns summarized information about existing query rulesets. +// Get all query rulesets. +// Get summarized information about the query rulesets. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-query-rulesets.html func New(tp elastictransport.Interface) *ListRulesets { @@ -274,7 +276,7 @@ func (r *ListRulesets) Header(key, value string) *ListRulesets { return r } -// From Starting offset (default: 0) +// From The offset from the first result to fetch. // API name: from func (r *ListRulesets) From(from int) *ListRulesets { r.values.Set("from", strconv.Itoa(from)) @@ -282,7 +284,7 @@ func (r *ListRulesets) From(from int) *ListRulesets { return r } -// Size specifies a max number of results to get +// Size The maximum number of results to retrieve. // API name: size func (r *ListRulesets) Size(size int) *ListRulesets { r.values.Set("size", strconv.Itoa(size)) diff --git a/typedapi/queryrules/listrulesets/response.go b/typedapi/queryrules/listrulesets/response.go index 454a5a4b16..83b545c8e4 100644 --- a/typedapi/queryrules/listrulesets/response.go +++ b/typedapi/queryrules/listrulesets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package listrulesets @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package listrulesets // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/list_rulesets/QueryRulesetListResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/list_rulesets/QueryRulesetListResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` Results []types.QueryRulesetListItem `json:"results"` diff --git a/typedapi/queryrules/putrule/put_rule.go b/typedapi/queryrules/putrule/put_rule.go index 93dcb66d1e..8c232b4c11 100644 --- a/typedapi/queryrules/putrule/put_rule.go +++ b/typedapi/queryrules/putrule/put_rule.go @@ -16,9 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a query rule within a query ruleset. +// Create or update a query rule. +// Create or update a query rule within a query ruleset. +// +// IMPORTANT: Due to limitations within pinned queries, you can only pin +// documents using ids or docs, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. package putrule import ( @@ -87,7 +95,15 @@ func NewPutRuleFunc(tp elastictransport.Interface) NewPutRule { } } -// Creates or updates a query rule within a query ruleset. +// Create or update a query rule. +// Create or update a query rule within a query ruleset. +// +// IMPORTANT: Due to limitations within pinned queries, you can only pin +// documents using ids or docs, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-query-rule.html func New(tp elastictransport.Interface) *PutRule { @@ -97,8 +113,6 @@ func New(tp elastictransport.Interface) *PutRule { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -319,7 +333,7 @@ func (r *PutRule) Header(key, value string) *PutRule { } // RulesetId The unique identifier of the query ruleset containing the rule to be created -// or updated +// or updated. // API Name: rulesetid func (r *PutRule) _rulesetid(rulesetid string) *PutRule { r.paramSet |= rulesetidMask @@ -329,7 +343,7 @@ func (r *PutRule) _rulesetid(rulesetid string) *PutRule { } // RuleId The unique identifier of the query rule within the specified ruleset to be -// created or updated +// created or updated. // API Name: ruleid func (r *PutRule) _ruleid(ruleid string) *PutRule { r.paramSet |= ruleidMask @@ -382,31 +396,56 @@ func (r *PutRule) Pretty(pretty bool) *PutRule { return r } +// The actions to take when the rule is matched. +// The format of this action depends on the rule type. // API name: actions -func (r *PutRule) Actions(actions *types.QueryRuleActions) *PutRule { +func (r *PutRule) Actions(actions types.QueryRuleActionsVariant) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Actions = *actions + r.req.Actions = *actions.QueryRuleActionsCaster() return r } +// The criteria that must be met for the rule to be applied. +// If multiple criteria are specified for a rule, all criteria must be met for +// the rule to be applied. // API name: criteria -func (r *PutRule) Criteria(criteria ...types.QueryRuleCriteria) *PutRule { - r.req.Criteria = criteria +func (r *PutRule) Criteria(criteria ...types.QueryRuleCriteriaVariant) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Criteria = make([]types.QueryRuleCriteria, len(criteria)) + for i, v := range criteria { + r.req.Criteria[i] = *v.QueryRuleCriteriaCaster() + } return r } // API name: priority func (r *PutRule) Priority(priority int) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Priority = &priority return r } +// The type of rule. // API name: type func (r *PutRule) Type(type_ queryruletype.QueryRuleType) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Type = type_ - return r } diff --git a/typedapi/queryrules/putrule/request.go b/typedapi/queryrules/putrule/request.go index e96f9627b3..7c7c7ac341 100644 --- a/typedapi/queryrules/putrule/request.go +++ b/typedapi/queryrules/putrule/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putrule @@ -34,12 +34,19 @@ import ( // Request holds the request body struct for the package putrule // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/put_rule/QueryRulePutRequest.ts#L28-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/put_rule/QueryRulePutRequest.ts#L28-L79 type Request struct { - Actions types.QueryRuleActions `json:"actions"` - Criteria []types.QueryRuleCriteria `json:"criteria"` - Priority *int `json:"priority,omitempty"` - Type queryruletype.QueryRuleType `json:"type"` + + // Actions The actions to take when the rule is matched. + // The format of this action depends on the rule type. + Actions types.QueryRuleActions `json:"actions"` + // Criteria The criteria that must be met for the rule to be applied. + // If multiple criteria are specified for a rule, all criteria must be met for + // the rule to be applied. + Criteria []types.QueryRuleCriteria `json:"criteria"` + Priority *int `json:"priority,omitempty"` + // Type The type of rule. + Type queryruletype.QueryRuleType `json:"type"` } // NewRequest returns a Request diff --git a/typedapi/queryrules/putrule/response.go b/typedapi/queryrules/putrule/response.go index f957f9e31e..000779095e 100644 --- a/typedapi/queryrules/putrule/response.go +++ b/typedapi/queryrules/putrule/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putrule @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putrule // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/put_rule/QueryRulePutResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/put_rule/QueryRulePutResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/queryrules/putruleset/put_ruleset.go b/typedapi/queryrules/putruleset/put_ruleset.go index 818df94d17..a891ef4797 100644 --- a/typedapi/queryrules/putruleset/put_ruleset.go +++ b/typedapi/queryrules/putruleset/put_ruleset.go @@ -16,9 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a query ruleset. +// Create or update a query ruleset. +// There is a limit of 100 rules per ruleset. +// This limit can be increased by using the +// `xpack.applications.rules.max_rules_per_ruleset` cluster setting. +// +// IMPORTANT: Due to limitations within pinned queries, you can only select +// documents using `ids` or `docs`, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. package putruleset import ( @@ -81,7 +91,17 @@ func NewPutRulesetFunc(tp elastictransport.Interface) NewPutRuleset { } } -// Creates or updates a query ruleset. +// Create or update a query ruleset. +// There is a limit of 100 rules per ruleset. +// This limit can be increased by using the +// `xpack.applications.rules.max_rules_per_ruleset` cluster setting. +// +// IMPORTANT: Due to limitations within pinned queries, you can only select +// documents using `ids` or `docs`, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-query-ruleset.html func New(tp elastictransport.Interface) *PutRuleset { @@ -91,8 +111,6 @@ func New(tp elastictransport.Interface) *PutRuleset { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -304,7 +322,7 @@ func (r *PutRuleset) Header(key, value string) *PutRuleset { return r } -// RulesetId The unique identifier of the query ruleset to be created or updated +// RulesetId The unique identifier of the query ruleset to be created or updated. // API Name: rulesetid func (r *PutRuleset) _rulesetid(rulesetid string) *PutRuleset { r.paramSet |= rulesetidMask @@ -358,8 +376,15 @@ func (r *PutRuleset) Pretty(pretty bool) *PutRuleset { } // API name: rules -func (r *PutRuleset) Rules(rules ...types.QueryRule) *PutRuleset { - r.req.Rules = rules +func (r *PutRuleset) Rules(rules ...types.QueryRuleVariant) *PutRuleset { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rules = make([]types.QueryRule, len(rules)) + for i, v := range rules { + r.req.Rules[i] = *v.QueryRuleCaster() + } return r } diff --git a/typedapi/queryrules/putruleset/request.go b/typedapi/queryrules/putruleset/request.go index 9ec8bc9ae7..b7c2ce74e2 100644 --- a/typedapi/queryrules/putruleset/request.go +++ b/typedapi/queryrules/putruleset/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putruleset @@ -32,7 +32,7 @@ import ( // Request holds the request body struct for the package putruleset // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/put_ruleset/QueryRulesetPutRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/put_ruleset/QueryRulesetPutRequest.ts#L23-L59 type Request struct { Rules []types.QueryRule `json:"rules"` } diff --git a/typedapi/queryrules/putruleset/response.go b/typedapi/queryrules/putruleset/response.go index 4e1de2f35a..478817e9b5 100644 --- a/typedapi/queryrules/putruleset/response.go +++ b/typedapi/queryrules/putruleset/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putruleset @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putruleset // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/put_ruleset/QueryRulesetPutResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/put_ruleset/QueryRulesetPutResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/queryrules/test/request.go b/typedapi/queryrules/test/request.go new file mode 100644 index 0000000000..cb843e36d6 --- /dev/null +++ b/typedapi/queryrules/test/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package test + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package test +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/test/QueryRulesetTestRequest.ts#L24-L57 +type Request struct { + + // MatchCriteria The match criteria to apply to rules in the given query ruleset. + // Match criteria should match the keys defined in the `criteria.metadata` field + // of the rule. + MatchCriteria map[string]json.RawMessage `json:"match_criteria"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + MatchCriteria: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Test request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/queryrules/test/response.go b/typedapi/queryrules/test/response.go new file mode 100644 index 0000000000..6f8f860e79 --- /dev/null +++ b/typedapi/queryrules/test/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package test + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package test +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/test/QueryRulesetTestResponse.ts#L23-L28 +type Response struct { + MatchedRules []types.QueryRulesetMatchedRule `json:"matched_rules"` + TotalMatchedRules int `json:"total_matched_rules"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/queryrules/test/test.go b/typedapi/queryrules/test/test.go new file mode 100644 index 0000000000..d4ba7d09e9 --- /dev/null +++ b/typedapi/queryrules/test/test.go @@ -0,0 +1,394 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Test a query ruleset. +// Evaluate match criteria against a query ruleset to identify the rules that +// would match that criteria. +package test + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Test struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + rulesetid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTest type alias for index. +type NewTest func(rulesetid string) *Test + +// NewTestFunc returns a new instance of Test with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTestFunc(tp elastictransport.Interface) NewTest { + return func(rulesetid string) *Test { + n := New(tp) + + n._rulesetid(rulesetid) + + return n + } +} + +// Test a query ruleset. +// Evaluate match criteria against a query ruleset to identify the rules that +// would match that criteria. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/test-query-ruleset.html +func New(tp elastictransport.Interface) *Test { + r := &Test{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Test) Raw(raw io.Reader) *Test { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Test) Request(req *Request) *Test { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Test) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Test: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "rulesetid", r.rulesetid) + } + path.WriteString(r.rulesetid) + path.WriteString("/") + path.WriteString("_test") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Test) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "query_rules.test") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.test") + if reader := instrument.RecordRequestBody(ctx, "query_rules.test", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.test") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Test query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a test.Response +func (r Test) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.test") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Test headers map. +func (r *Test) Header(key, value string) *Test { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset to be created or updated +// API Name: rulesetid +func (r *Test) _rulesetid(rulesetid string) *Test { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Test) ErrorTrace(errortrace bool) *Test { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Test) FilterPath(filterpaths ...string) *Test { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Test) Human(human bool) *Test { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Test) Pretty(pretty bool) *Test { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The match criteria to apply to rules in the given query ruleset. +// Match criteria should match the keys defined in the `criteria.metadata` field +// of the rule. +// API name: match_criteria +func (r *Test) MatchCriteria(matchcriteria map[string]json.RawMessage) *Test { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MatchCriteria = matchcriteria + return r +} + +func (r *Test) AddMatchCriterion(key string, value json.RawMessage) *Test { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.MatchCriteria == nil { + r.req.MatchCriteria = make(map[string]json.RawMessage) + } else { + tmp = r.req.MatchCriteria + } + + tmp[key] = value + + r.req.MatchCriteria = tmp + return r +} diff --git a/typedapi/rollup/deletejob/delete_job.go b/typedapi/rollup/deletejob/delete_job.go index bb299370bc..716e85d0a3 100644 --- a/typedapi/rollup/deletejob/delete_job.go +++ b/typedapi/rollup/deletejob/delete_job.go @@ -16,9 +16,39 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes an existing rollup job. +// Delete a rollup job. +// +// A job must be stopped before it can be deleted. +// If you attempt to delete a started job, an error occurs. +// Similarly, if you attempt to delete a nonexistent job, an exception occurs. +// +// IMPORTANT: When you delete a job, you remove only the process that is +// actively monitoring and rolling up data. +// The API does not delete any previously rolled up data. +// This is by design; a user may wish to roll up a static data set. +// Because the data set is static, after it has been fully rolled up there is no +// need to keep the indexing rollup job around (as there will be no new data). +// Thus the job can be deleted, leaving behind the rolled up data for analysis. +// If you wish to also remove the rollup data and the rollup index contains the +// data for only a single job, you can delete the whole rollup index. +// If the rollup index stores data from several jobs, you must issue a +// delete-by-query that targets the rollup job's identifier in the rollup index. +// For example: +// +// ``` +// POST my_rollup_index/_delete_by_query +// +// { +// "query": { +// "term": { +// "_rollup.id": "the_rollup_job_id" +// } +// } +// } +// +// ``` package deletejob import ( @@ -76,7 +106,37 @@ func NewDeleteJobFunc(tp elastictransport.Interface) NewDeleteJob { } } -// Deletes an existing rollup job. +// Delete a rollup job. +// +// A job must be stopped before it can be deleted. +// If you attempt to delete a started job, an error occurs. +// Similarly, if you attempt to delete a nonexistent job, an exception occurs. +// +// IMPORTANT: When you delete a job, you remove only the process that is +// actively monitoring and rolling up data. +// The API does not delete any previously rolled up data. +// This is by design; a user may wish to roll up a static data set. +// Because the data set is static, after it has been fully rolled up there is no +// need to keep the indexing rollup job around (as there will be no new data). +// Thus the job can be deleted, leaving behind the rolled up data for analysis. +// If you wish to also remove the rollup data and the rollup index contains the +// data for only a single job, you can delete the whole rollup index. +// If the rollup index stores data from several jobs, you must issue a +// delete-by-query that targets the rollup job's identifier in the rollup index. +// For example: +// +// ``` +// POST my_rollup_index/_delete_by_query +// +// { +// "query": { +// "term": { +// "_rollup.id": "the_rollup_job_id" +// } +// } +// } +// +// ``` // // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-delete-job.html func New(tp elastictransport.Interface) *DeleteJob { diff --git a/typedapi/rollup/deletejob/response.go b/typedapi/rollup/deletejob/response.go index 1e139b86ff..209fed26cb 100644 --- a/typedapi/rollup/deletejob/response.go +++ b/typedapi/rollup/deletejob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletejob @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deletejob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/delete_job/DeleteRollupJobResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/delete_job/DeleteRollupJobResponse.ts#L22-L27 type Response struct { Acknowledged bool `json:"acknowledged"` TaskFailures []types.TaskFailure `json:"task_failures,omitempty"` diff --git a/typedapi/rollup/getjobs/get_jobs.go b/typedapi/rollup/getjobs/get_jobs.go index 2347dbcd0f..aff5d4b99b 100644 --- a/typedapi/rollup/getjobs/get_jobs.go +++ b/typedapi/rollup/getjobs/get_jobs.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves the configuration, stats, and status of rollup jobs. +// Get rollup job information. +// Get the configuration, stats, and status of rollup jobs. +// +// NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +// If a job was created, ran for a while, then was deleted, the API does not +// return any details about it. +// For details about a historical rollup job, the rollup capabilities API may be +// more useful. package getjobs import ( @@ -74,7 +81,14 @@ func NewGetJobsFunc(tp elastictransport.Interface) NewGetJobs { } } -// Retrieves the configuration, stats, and status of rollup jobs. +// Get rollup job information. +// Get the configuration, stats, and status of rollup jobs. +// +// NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +// If a job was created, ran for a while, then was deleted, the API does not +// return any details about it. +// For details about a historical rollup job, the rollup capabilities API may be +// more useful. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-job.html func New(tp elastictransport.Interface) *GetJobs { diff --git a/typedapi/rollup/getjobs/response.go b/typedapi/rollup/getjobs/response.go index 4ab93f7189..c5048927e3 100644 --- a/typedapi/rollup/getjobs/response.go +++ b/typedapi/rollup/getjobs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getjobs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getjobs // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_jobs/GetRollupJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_jobs/GetRollupJobResponse.ts#L22-L24 type Response struct { Jobs []types.RollupJob `json:"jobs"` } diff --git a/typedapi/rollup/getrollupcaps/get_rollup_caps.go b/typedapi/rollup/getrollupcaps/get_rollup_caps.go index 99da4f9f04..c93a6dc4ae 100644 --- a/typedapi/rollup/getrollupcaps/get_rollup_caps.go +++ b/typedapi/rollup/getrollupcaps/get_rollup_caps.go @@ -16,10 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the capabilities of any rollup jobs that have been configured for a +// Get the rollup job capabilities. +// Get the capabilities of any rollup jobs that have been configured for a // specific index or index pattern. +// +// This API is useful because a rollup job is often configured to rollup only a +// subset of fields from the source index. +// Furthermore, only certain aggregations can be configured for various fields, +// leading to a limited subset of functionality depending on that configuration. +// This API enables you to inspect an index and determine: +// +// 1. Does this index have associated rollup data somewhere in the cluster? +// 2. If yes to the first question, what fields were rolled up, what +// aggregations can be performed, and where does the data live? package getrollupcaps import ( @@ -75,9 +86,20 @@ func NewGetRollupCapsFunc(tp elastictransport.Interface) NewGetRollupCaps { } } -// Returns the capabilities of any rollup jobs that have been configured for a +// Get the rollup job capabilities. +// Get the capabilities of any rollup jobs that have been configured for a // specific index or index pattern. // +// This API is useful because a rollup job is often configured to rollup only a +// subset of fields from the source index. +// Furthermore, only certain aggregations can be configured for various fields, +// leading to a limited subset of functionality depending on that configuration. +// This API enables you to inspect an index and determine: +// +// 1. Does this index have associated rollup data somewhere in the cluster? +// 2. If yes to the first question, what fields were rolled up, what +// aggregations can be performed, and where does the data live? +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-caps.html func New(tp elastictransport.Interface) *GetRollupCaps { r := &GetRollupCaps{ diff --git a/typedapi/rollup/getrollupcaps/response.go b/typedapi/rollup/getrollupcaps/response.go index 7736bd1874..daaf4afea4 100644 --- a/typedapi/rollup/getrollupcaps/response.go +++ b/typedapi/rollup/getrollupcaps/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getrollupcaps @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrollupcaps // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_rollup_caps/GetRollupCapabilitiesResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_rollup_caps/GetRollupCapabilitiesResponse.ts#L24-L27 type Response map[string]types.RollupCapabilities diff --git a/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go b/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go index 711cc0ef04..33584d7197 100644 --- a/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go +++ b/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go @@ -16,10 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the rollup capabilities of all jobs inside of a rollup index (for -// example, the index where rollup data is stored). +// Get the rollup index capabilities. +// Get the rollup capabilities of all jobs inside of a rollup index. +// A single rollup index may store the data for multiple rollup jobs and may +// have a variety of capabilities depending on those jobs. This API enables you +// to determine: +// +// * What jobs are stored in an index (or indices specified via a pattern)? +// * What target indices were rolled up, what fields were used in those rollups, +// and what aggregations can be performed on each job? package getrollupindexcaps import ( @@ -77,8 +84,15 @@ func NewGetRollupIndexCapsFunc(tp elastictransport.Interface) NewGetRollupIndexC } } -// Returns the rollup capabilities of all jobs inside of a rollup index (for -// example, the index where rollup data is stored). +// Get the rollup index capabilities. +// Get the rollup capabilities of all jobs inside of a rollup index. +// A single rollup index may store the data for multiple rollup jobs and may +// have a variety of capabilities depending on those jobs. This API enables you +// to determine: +// +// * What jobs are stored in an index (or indices specified via a pattern)? +// * What target indices were rolled up, what fields were used in those rollups, +// and what aggregations can be performed on each job? // // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-index-caps.html func New(tp elastictransport.Interface) *GetRollupIndexCaps { diff --git a/typedapi/rollup/getrollupindexcaps/response.go b/typedapi/rollup/getrollupindexcaps/response.go index a8b693da89..8f56168112 100644 --- a/typedapi/rollup/getrollupindexcaps/response.go +++ b/typedapi/rollup/getrollupindexcaps/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getrollupindexcaps @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrollupindexcaps // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesResponse.ts#L24-L27 type Response map[string]types.IndexCapabilities diff --git a/typedapi/rollup/putjob/put_job.go b/typedapi/rollup/putjob/put_job.go index 45edc97180..6e53bbc914 100644 --- a/typedapi/rollup/putjob/put_job.go +++ b/typedapi/rollup/putjob/put_job.go @@ -16,9 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a rollup job. +// Create a rollup job. +// +// WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will +// fail with a message about the deprecation and planned removal of rollup +// features. A cluster needs to contain either a rollup job or a rollup index in +// order for this API to be allowed to run. +// +// The rollup job configuration contains all the details about how the job +// should run, when it indexes documents, and what future queries will be able +// to run against the rollup index. +// +// There are three main sections to the job configuration: the logistical +// details about the job (for example, the cron schedule), the fields that are +// used for grouping, and what metrics to collect for each group. +// +// Jobs are created in a `STOPPED` state. You can start them with the start +// rollup jobs API. package putjob import ( @@ -81,7 +97,23 @@ func NewPutJobFunc(tp elastictransport.Interface) NewPutJob { } } -// Creates a rollup job. +// Create a rollup job. +// +// WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will +// fail with a message about the deprecation and planned removal of rollup +// features. A cluster needs to contain either a rollup job or a rollup index in +// order for this API to be allowed to run. +// +// The rollup job configuration contains all the details about how the job +// should run, when it indexes documents, and what future queries will be able +// to run against the rollup index. +// +// There are three main sections to the job configuration: the logistical +// details about the job (for example, the cron schedule), the fields that are +// used for grouping, and what metrics to collect for each group. +// +// Jobs are created in a `STOPPED` state. You can start them with the start +// rollup jobs API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-put-job.html func New(tp elastictransport.Interface) *PutJob { @@ -91,8 +123,6 @@ func New(tp elastictransport.Interface) *PutJob { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -367,7 +397,7 @@ func (r *PutJob) Pretty(pretty bool) *PutJob { return r } -// Cron A cron string which defines the intervals when the rollup job should be +// A cron string which defines the intervals when the rollup job should be // executed. When the interval // triggers, the indexer attempts to rollup the data in the index pattern. The // cron pattern is unrelated @@ -378,13 +408,17 @@ func (r *PutJob) Pretty(pretty bool) *PutJob { // cron pattern is defined just like a Watcher cron schedule. // API name: cron func (r *PutJob) Cron(cron string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Cron = cron return r } -// Groups Defines the grouping fields and aggregations that are defined for this rollup +// Defines the grouping fields and aggregations that are defined for this rollup // job. These fields will then be // available later for aggregating into buckets. These aggs and fields can be // used in any combination. Think of @@ -395,45 +429,65 @@ func (r *PutJob) Cron(cron string) *PutJob { // enough flexibility that you simply need to determine which fields are needed, // not in what order they are needed. // API name: groups -func (r *PutJob) Groups(groups *types.Groupings) *PutJob { +func (r *PutJob) Groups(groups types.GroupingsVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Groups = *groups + r.req.Groups = *groups.GroupingsCaster() return r } // API name: headers -func (r *PutJob) Headers(httpheaders types.HttpHeaders) *PutJob { - r.req.Headers = httpheaders +func (r *PutJob) Headers(httpheaders types.HttpHeadersVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Headers = *httpheaders.HttpHeadersCaster() return r } -// IndexPattern The index or index pattern to roll up. Supports wildcard-style patterns +// The index or index pattern to roll up. Supports wildcard-style patterns // (`logstash-*`). The job attempts to // rollup the entire index or index-pattern. // API name: index_pattern func (r *PutJob) IndexPattern(indexpattern string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.IndexPattern = indexpattern return r } -// Metrics Defines the metrics to collect for each grouping tuple. By default, only the +// Defines the metrics to collect for each grouping tuple. By default, only the // doc_counts are collected for each // group. To make rollup useful, you will often add metrics like averages, mins, // maxes, etc. Metrics are defined // on a per-field basis and for each field you configure which metric should be // collected. // API name: metrics -func (r *PutJob) Metrics(metrics ...types.FieldMetric) *PutJob { - r.req.Metrics = metrics +func (r *PutJob) Metrics(metrics ...types.FieldMetricVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range metrics { + r.req.Metrics = append(r.req.Metrics, *v.FieldMetricCaster()) + + } return r } -// PageSize The number of bucket results that are processed on each iteration of the +// The number of bucket results that are processed on each iteration of the // rollup indexer. A larger value tends // to execute faster, but requires more memory during processing. This value has // no effect on how the data is @@ -441,25 +495,40 @@ func (r *PutJob) Metrics(metrics ...types.FieldMetric) *PutJob { // indexer. // API name: page_size func (r *PutJob) PageSize(pagesize int) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.PageSize = pagesize return r } -// RollupIndex The index that contains the rollup results. The index can be shared with +// The index that contains the rollup results. The index can be shared with // other rollup jobs. The data is stored so that it doesn’t interfere with // unrelated jobs. // API name: rollup_index func (r *PutJob) RollupIndex(indexname string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RollupIndex = indexname return r } -// Timeout Time to wait for the request to complete. +// Time to wait for the request to complete. // API name: timeout -func (r *PutJob) Timeout(duration types.Duration) *PutJob { - r.req.Timeout = duration +func (r *PutJob) Timeout(duration types.DurationVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() return r } diff --git a/typedapi/rollup/putjob/request.go b/typedapi/rollup/putjob/request.go index ad8a7a86fa..80ff7b750a 100644 --- a/typedapi/rollup/putjob/request.go +++ b/typedapi/rollup/putjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putjob @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/put_job/CreateRollupJobRequest.ts#L27-L89 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/put_job/CreateRollupJobRequest.ts#L27-L105 type Request struct { // Cron A cron string which defines the intervals when the rollup job should be diff --git a/typedapi/rollup/putjob/response.go b/typedapi/rollup/putjob/response.go index c3eff1abc3..c48d3a3775 100644 --- a/typedapi/rollup/putjob/response.go +++ b/typedapi/rollup/putjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putjob // Response holds the response body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/put_job/CreateRollupJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/put_job/CreateRollupJobResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/rollup/rollupsearch/request.go b/typedapi/rollup/rollupsearch/request.go index d72fab40d9..e8d3f3e3fb 100644 --- a/typedapi/rollup/rollupsearch/request.go +++ b/typedapi/rollup/rollupsearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package rollupsearch @@ -33,12 +33,12 @@ import ( // Request holds the request body struct for the package rollupsearch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/rollup_search/RollupSearchRequest.ts#L27-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/rollup_search/RollupSearchRequest.ts#L27-L109 type Request struct { // Aggregations Specifies aggregations. Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` - // Query Specifies a DSL query. + // Query Specifies a DSL query that is subject to some limitations. Query *types.Query `json:"query,omitempty"` // Size Must be zero if set, as rollups work on pre-aggregated data. Size *int `json:"size,omitempty"` diff --git a/typedapi/rollup/rollupsearch/response.go b/typedapi/rollup/rollupsearch/response.go index e65dc2ff99..099250da80 100644 --- a/typedapi/rollup/rollupsearch/response.go +++ b/typedapi/rollup/rollupsearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package rollupsearch @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package rollupsearch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/rollup_search/RollupSearchResponse.ts#L27-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/rollup_search/RollupSearchResponse.ts#L27-L36 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` Hits types.HitsMetadata `json:"hits"` @@ -494,6 +494,13 @@ func (s *Response) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { diff --git a/typedapi/rollup/rollupsearch/rollup_search.go b/typedapi/rollup/rollupsearch/rollup_search.go index e66957ae59..3bbe58bcaf 100644 --- a/typedapi/rollup/rollupsearch/rollup_search.go +++ b/typedapi/rollup/rollupsearch/rollup_search.go @@ -16,9 +16,54 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Enables searching rolled-up data using the standard Query DSL. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Search rolled-up data. +// The rollup search endpoint is needed because, internally, rolled-up documents +// utilize a different document structure than the original data. +// It rewrites standard Query DSL into a format that matches the rollup +// documents then takes the response and rewrites it back to what a client would +// expect given the original query. +// +// The request body supports a subset of features from the regular search API. +// The following functionality is not available: +// +// `size`: Because rollups work on pre-aggregated data, no search hits can be +// returned and so size must be set to zero or omitted entirely. +// `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are +// similarly disallowed. +// +// **Searching both historical rollup and non-rollup data** +// +// The rollup search API has the capability to search across both "live" +// non-rollup data and the aggregated rollup data. +// This is done by simply adding the live indices to the URI. For example: +// +// ``` +// GET sensor-1,sensor_rollup/_rollup_search +// +// { +// "size": 0, +// "aggregations": { +// "max_temperature": { +// "max": { +// "field": "temperature" +// } +// } +// } +// } +// +// ``` +// +// The rollup search endpoint does two things when the search runs: +// +// * The original request is sent to the non-rollup index unaltered. +// * A rewritten version of the original request is sent to the rollup index. +// +// When the two responses are received, the endpoint rewrites the rollup +// response and merges the two together. +// During the merging process, if there is any overlap in buckets between the +// two responses, the buckets from the non-rollup index are used. package rollupsearch import ( @@ -81,7 +126,52 @@ func NewRollupSearchFunc(tp elastictransport.Interface) NewRollupSearch { } } -// Enables searching rolled-up data using the standard Query DSL. +// Search rolled-up data. +// The rollup search endpoint is needed because, internally, rolled-up documents +// utilize a different document structure than the original data. +// It rewrites standard Query DSL into a format that matches the rollup +// documents then takes the response and rewrites it back to what a client would +// expect given the original query. +// +// The request body supports a subset of features from the regular search API. +// The following functionality is not available: +// +// `size`: Because rollups work on pre-aggregated data, no search hits can be +// returned and so size must be set to zero or omitted entirely. +// `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are +// similarly disallowed. +// +// **Searching both historical rollup and non-rollup data** +// +// The rollup search API has the capability to search across both "live" +// non-rollup data and the aggregated rollup data. +// This is done by simply adding the live indices to the URI. For example: +// +// ``` +// GET sensor-1,sensor_rollup/_rollup_search +// +// { +// "size": 0, +// "aggregations": { +// "max_temperature": { +// "max": { +// "field": "temperature" +// } +// } +// } +// } +// +// ``` +// +// The rollup search endpoint does two things when the search runs: +// +// * The original request is sent to the non-rollup index unaltered. +// * A rewritten version of the original request is sent to the rollup index. +// +// When the two responses are received, the endpoint rewrites the rollup +// response and merges the two together. +// During the merging process, if there is any overlap in buckets between the +// two responses, the buckets from the non-rollup index are used. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-search.html func New(tp elastictransport.Interface) *RollupSearch { @@ -91,8 +181,6 @@ func New(tp elastictransport.Interface) *RollupSearch { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -306,7 +394,19 @@ func (r *RollupSearch) Header(key, value string) *RollupSearch { return r } -// Index Enables searching rolled-up data using the standard Query DSL. +// Index A comma-separated list of data streams and indices used to limit the request. +// This parameter has the following rules: +// +// * At least one data stream, index, or wildcard expression must be specified. +// This target can include a rollup or non-rollup index. For data streams, the +// stream's backing indices can only serve as non-rollup indices. Omitting the +// parameter or using `_all` are not permitted. +// * Multiple non-rollup indices may be specified. +// * Only one rollup index may be specified. If more than one are supplied, an +// exception occurs. +// * Wildcard expressions (`*`) may be used. If they match more than one rollup +// index, an exception occurs. However, you can use an expression to match +// multiple non-rollup indices or data streams. // API Name: index func (r *RollupSearch) _index(index string) *RollupSearch { r.paramSet |= indexMask @@ -377,27 +477,57 @@ func (r *RollupSearch) Pretty(pretty bool) *RollupSearch { return r } -// Aggregations Specifies aggregations. +// Specifies aggregations. // API name: aggregations func (r *RollupSearch) Aggregations(aggregations map[string]types.Aggregations) *RollupSearch { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} +func (r *RollupSearch) AddAggregation(key string, value types.AggregationsVariant) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp return r } -// Query Specifies a DSL query. +// Specifies a DSL query that is subject to some limitations. // API name: query -func (r *RollupSearch) Query(query *types.Query) *RollupSearch { +func (r *RollupSearch) Query(query types.QueryVariant) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// Size Must be zero if set, as rollups work on pre-aggregated data. +// Must be zero if set, as rollups work on pre-aggregated data. // API name: size func (r *RollupSearch) Size(size int) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r diff --git a/typedapi/rollup/startjob/response.go b/typedapi/rollup/startjob/response.go index 6216869cec..ce25d65d9c 100644 --- a/typedapi/rollup/startjob/response.go +++ b/typedapi/rollup/startjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package startjob // Response holds the response body struct for the package startjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/start_job/StartRollupJobResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/start_job/StartRollupJobResponse.ts#L20-L22 type Response struct { Started bool `json:"started"` } diff --git a/typedapi/rollup/startjob/start_job.go b/typedapi/rollup/startjob/start_job.go index c3f241e71b..62f70a80b8 100644 --- a/typedapi/rollup/startjob/start_job.go +++ b/typedapi/rollup/startjob/start_job.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Starts an existing, stopped rollup job. +// Start rollup jobs. +// If you try to start a job that does not exist, an exception occurs. +// If you try to start a job that is already started, nothing happens. package startjob import ( @@ -76,7 +78,9 @@ func NewStartJobFunc(tp elastictransport.Interface) NewStartJob { } } -// Starts an existing, stopped rollup job. +// Start rollup jobs. +// If you try to start a job that does not exist, an exception occurs. +// If you try to start a job that is already started, nothing happens. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-start-job.html func New(tp elastictransport.Interface) *StartJob { diff --git a/typedapi/rollup/stopjob/response.go b/typedapi/rollup/stopjob/response.go index 246df9ce55..6e50147b08 100644 --- a/typedapi/rollup/stopjob/response.go +++ b/typedapi/rollup/stopjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stopjob // Response holds the response body struct for the package stopjob // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/stop_job/StopRollupJobResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/stop_job/StopRollupJobResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` } diff --git a/typedapi/rollup/stopjob/stop_job.go b/typedapi/rollup/stopjob/stop_job.go index 2ab6c76b06..90ac38f5d9 100644 --- a/typedapi/rollup/stopjob/stop_job.go +++ b/typedapi/rollup/stopjob/stop_job.go @@ -16,9 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Stops an existing, started rollup job. +// Stop rollup jobs. +// If you try to stop a job that does not exist, an exception occurs. +// If you try to stop a job that is already stopped, nothing happens. +// +// Since only a stopped job can be deleted, it can be useful to block the API +// until the indexer has fully stopped. +// This is accomplished with the `wait_for_completion` query parameter, and +// optionally a timeout. For example: +// +// ``` +// POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +// ``` +// The parameter blocks the API call from returning until either the job has +// moved to STOPPED or the specified time has elapsed. +// If the specified time elapses without the job moving to STOPPED, a timeout +// exception occurs. package stopjob import ( @@ -76,7 +91,22 @@ func NewStopJobFunc(tp elastictransport.Interface) NewStopJob { } } -// Stops an existing, started rollup job. +// Stop rollup jobs. +// If you try to stop a job that does not exist, an exception occurs. +// If you try to stop a job that is already stopped, nothing happens. +// +// Since only a stopped job can be deleted, it can be useful to block the API +// until the indexer has fully stopped. +// This is accomplished with the `wait_for_completion` query parameter, and +// optionally a timeout. For example: +// +// ``` +// POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +// ``` +// The parameter blocks the API call from returning until either the job has +// moved to STOPPED or the specified time has elapsed. +// If the specified time elapses without the job moving to STOPPED, a timeout +// exception occurs. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-stop-job.html func New(tp elastictransport.Interface) *StopJob { @@ -304,6 +334,10 @@ func (r *StopJob) _id(id string) *StopJob { // Timeout If `wait_for_completion` is `true`, the API blocks for (at maximum) the // specified duration while waiting for the job to stop. // If more than `timeout` time has passed, the API throws a timeout exception. +// NOTE: Even if a timeout occurs, the stop request is still processing and +// eventually moves the job to STOPPED. +// The timeout simply means the API call itself timed out while waiting for the +// status change. // API name: timeout func (r *StopJob) Timeout(duration string) *StopJob { r.values.Set("timeout", duration) diff --git a/typedapi/searchablesnapshots/cachestats/cache_stats.go b/typedapi/searchablesnapshots/cachestats/cache_stats.go index be0b2cbaca..032987f3d3 100644 --- a/typedapi/searchablesnapshots/cachestats/cache_stats.go +++ b/typedapi/searchablesnapshots/cachestats/cache_stats.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieve node-level cache statistics about searchable snapshots. +// Get cache statistics. +// Get statistics about the shared cache for partially mounted indices. package cachestats import ( @@ -74,9 +75,10 @@ func NewCacheStatsFunc(tp elastictransport.Interface) NewCacheStats { } } -// Retrieve node-level cache statistics about searchable snapshots. +// Get cache statistics. +// Get statistics about the shared cache for partially mounted indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-cache-stats.html func New(tp elastictransport.Interface) *CacheStats { r := &CacheStats{ transport: tp, @@ -299,9 +301,7 @@ func (r *CacheStats) Header(key, value string) *CacheStats { return r } -// NodeId A comma-separated list of node IDs or names to limit the returned -// information; use `_local` to return information from the node you're -// connecting to, leave empty to get information from all nodes +// NodeId The names of the nodes in the cluster to target. // API Name: nodeid func (r *CacheStats) NodeId(nodeid string) *CacheStats { r.paramSet |= nodeidMask diff --git a/typedapi/searchablesnapshots/cachestats/response.go b/typedapi/searchablesnapshots/cachestats/response.go index e33279cb8a..b771daa6b8 100644 --- a/typedapi/searchablesnapshots/cachestats/response.go +++ b/typedapi/searchablesnapshots/cachestats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package cachestats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package cachestats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/searchable_snapshots/cache_stats/Response.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/searchable_snapshots/cache_stats/Response.ts#L24-L28 type Response struct { Nodes map[string]types.Node `json:"nodes"` } diff --git a/typedapi/searchablesnapshots/clearcache/clear_cache.go b/typedapi/searchablesnapshots/clearcache/clear_cache.go index be0103eb42..05a4de920f 100644 --- a/typedapi/searchablesnapshots/clearcache/clear_cache.go +++ b/typedapi/searchablesnapshots/clearcache/clear_cache.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Clear the cache of searchable snapshots. +// Clear the cache. +// Clear indices and data streams from the shared cache for partially mounted +// indices. package clearcache import ( @@ -75,9 +77,11 @@ func NewClearCacheFunc(tp elastictransport.Interface) NewClearCache { } } -// Clear the cache of searchable snapshots. +// Clear the cache. +// Clear indices and data streams from the shared cache for partially mounted +// indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-clear-cache.html func New(tp elastictransport.Interface) *ClearCache { r := &ClearCache{ transport: tp, @@ -300,7 +304,9 @@ func (r *ClearCache) Header(key, value string) *ClearCache { return r } -// Index A comma-separated list of index names +// Index A comma-separated list of data streams, indices, and aliases to clear from +// the cache. +// It supports wildcards (`*`). // API Name: index func (r *ClearCache) Index(index string) *ClearCache { r.paramSet |= indexMask @@ -340,20 +346,6 @@ func (r *ClearCache) IgnoreUnavailable(ignoreunavailable bool) *ClearCache { return r } -// API name: pretty -func (r *ClearCache) Pretty(pretty bool) *ClearCache { - r.values.Set("pretty", strconv.FormatBool(pretty)) - - return r -} - -// API name: human -func (r *ClearCache) Human(human bool) *ClearCache { - r.values.Set("human", strconv.FormatBool(human)) - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -375,3 +367,25 @@ func (r *ClearCache) FilterPath(filterpaths ...string) *ClearCache { return r } + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearCache) Human(human bool) *ClearCache { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearCache) Pretty(pretty bool) *ClearCache { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/searchablesnapshots/clearcache/response.go b/typedapi/searchablesnapshots/clearcache/response.go index c49275819e..554ebb020b 100644 --- a/typedapi/searchablesnapshots/clearcache/response.go +++ b/typedapi/searchablesnapshots/clearcache/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearcache @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcache // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheResponse.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheResponse.ts#L22-L25 type Response = json.RawMessage diff --git a/typedapi/searchablesnapshots/mount/mount.go b/typedapi/searchablesnapshots/mount/mount.go index 237d3edbc1..7b2659e9a5 100644 --- a/typedapi/searchablesnapshots/mount/mount.go +++ b/typedapi/searchablesnapshots/mount/mount.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Mount a snapshot as a searchable index. +// Mount a snapshot. +// Mount a snapshot as a searchable snapshot index. +// Do not use this API for snapshots managed by index lifecycle management +// (ILM). +// Manually mounting ILM-managed snapshots can interfere with ILM processes. package mount import ( @@ -86,7 +90,11 @@ func NewMountFunc(tp elastictransport.Interface) NewMount { } } -// Mount a snapshot as a searchable index. +// Mount a snapshot. +// Mount a snapshot as a searchable snapshot index. +// Do not use this API for snapshots managed by index lifecycle management +// (ILM). +// Manually mounting ILM-managed snapshots can interfere with ILM processes. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-mount-snapshot.html func New(tp elastictransport.Interface) *Mount { @@ -96,8 +104,6 @@ func New(tp elastictransport.Interface) *Mount { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -317,7 +323,7 @@ func (r *Mount) Header(key, value string) *Mount { return r } -// Repository The name of the repository containing the snapshot of the index to mount +// Repository The name of the repository containing the snapshot of the index to mount. // API Name: repository func (r *Mount) _repository(repository string) *Mount { r.paramSet |= repositoryMask @@ -326,7 +332,7 @@ func (r *Mount) _repository(repository string) *Mount { return r } -// Snapshot The name of the snapshot of the index to mount +// Snapshot The name of the snapshot of the index to mount. // API Name: snapshot func (r *Mount) _snapshot(snapshot string) *Mount { r.paramSet |= snapshotMask @@ -335,7 +341,10 @@ func (r *Mount) _snapshot(snapshot string) *Mount { return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *Mount) MasterTimeout(duration string) *Mount { r.values.Set("master_timeout", duration) @@ -343,7 +352,7 @@ func (r *Mount) MasterTimeout(duration string) *Mount { return r } -// WaitForCompletion Should this request wait until the operation has completed before returning +// WaitForCompletion If true, the request blocks until the operation is complete. // API name: wait_for_completion func (r *Mount) WaitForCompletion(waitforcompletion bool) *Mount { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) @@ -351,8 +360,7 @@ func (r *Mount) WaitForCompletion(waitforcompletion bool) *Mount { return r } -// Storage Selects the kind of local storage used to accelerate searches. Experimental, -// and defaults to `full_copy` +// Storage The mount option for the searchable snapshot index. // API name: storage func (r *Mount) Storage(storage string) *Mount { r.values.Set("storage", storage) @@ -404,30 +412,75 @@ func (r *Mount) Pretty(pretty bool) *Mount { return r } +// The names of settings that should be removed from the index when it is +// mounted. // API name: ignore_index_settings func (r *Mount) IgnoreIndexSettings(ignoreindexsettings ...string) *Mount { - r.req.IgnoreIndexSettings = ignoreindexsettings + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoreindexsettings { + + r.req.IgnoreIndexSettings = append(r.req.IgnoreIndexSettings, v) + } return r } +// The name of the index contained in the snapshot whose data is to be mounted. +// If no `renamed_index` is specified, this name will also be used to create the +// new index. // API name: index func (r *Mount) Index(indexname string) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Index = indexname return r } +// The settings that should be added to the index when it is mounted. // API name: index_settings func (r *Mount) IndexSettings(indexsettings map[string]json.RawMessage) *Mount { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.IndexSettings = indexsettings + return r +} + +func (r *Mount) AddIndexSetting(key string, value json.RawMessage) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + var tmp map[string]json.RawMessage + if r.req.IndexSettings == nil { + r.req.IndexSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.IndexSettings + } + + tmp[key] = value + + r.req.IndexSettings = tmp return r } +// The name of the index that will be created. // API name: renamed_index func (r *Mount) RenamedIndex(indexname string) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RenamedIndex = &indexname return r diff --git a/typedapi/searchablesnapshots/mount/request.go b/typedapi/searchablesnapshots/mount/request.go index 72c03dbd73..f4aee112fe 100644 --- a/typedapi/searchablesnapshots/mount/request.go +++ b/typedapi/searchablesnapshots/mount/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mount @@ -30,12 +30,20 @@ import ( // Request holds the request body struct for the package mount // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L92 type Request struct { - IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` - Index string `json:"index"` - IndexSettings map[string]json.RawMessage `json:"index_settings,omitempty"` - RenamedIndex *string `json:"renamed_index,omitempty"` + + // IgnoreIndexSettings The names of settings that should be removed from the index when it is + // mounted. + IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` + // Index The name of the index contained in the snapshot whose data is to be mounted. + // If no `renamed_index` is specified, this name will also be used to create the + // new index. + Index string `json:"index"` + // IndexSettings The settings that should be added to the index when it is mounted. + IndexSettings map[string]json.RawMessage `json:"index_settings,omitempty"` + // RenamedIndex The name of the index that will be created. + RenamedIndex *string `json:"renamed_index,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/searchablesnapshots/mount/response.go b/typedapi/searchablesnapshots/mount/response.go index d16af094c8..d0dffe9209 100644 --- a/typedapi/searchablesnapshots/mount/response.go +++ b/typedapi/searchablesnapshots/mount/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package mount @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mount // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/searchable_snapshots/mount/SearchableSnapshotsMountResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/searchable_snapshots/mount/SearchableSnapshotsMountResponse.ts#L22-L26 type Response struct { Snapshot types.MountedSnapshot `json:"snapshot"` } diff --git a/typedapi/searchablesnapshots/stats/response.go b/typedapi/searchablesnapshots/stats/response.go index 95ba71e979..491d7df36a 100644 --- a/typedapi/searchablesnapshots/stats/response.go +++ b/typedapi/searchablesnapshots/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/searchable_snapshots/stats/SearchableSnapshotsStatsResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/searchable_snapshots/stats/SearchableSnapshotsStatsResponse.ts#L22-L27 type Response struct { Stats json.RawMessage `json:"stats,omitempty"` Total json.RawMessage `json:"total,omitempty"` diff --git a/typedapi/searchablesnapshots/stats/stats.go b/typedapi/searchablesnapshots/stats/stats.go index f4f4f7c653..2e7f50eeaa 100644 --- a/typedapi/searchablesnapshots/stats/stats.go +++ b/typedapi/searchablesnapshots/stats/stats.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieve shard-level statistics about searchable snapshots. +// Get searchable snapshot statistics. package stats import ( @@ -75,9 +75,9 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Retrieve shard-level statistics about searchable snapshots. +// Get searchable snapshot statistics. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-stats.html func New(tp elastictransport.Interface) *Stats { r := &Stats{ transport: tp, @@ -296,7 +296,8 @@ func (r *Stats) Header(key, value string) *Stats { return r } -// Index A comma-separated list of index names +// Index A comma-separated list of data streams and indices to retrieve statistics +// for. // API Name: index func (r *Stats) Index(index string) *Stats { r.paramSet |= indexMask diff --git a/typedapi/searchapplication/delete/delete.go b/typedapi/searchapplication/delete/delete.go index e45c803163..b2a9764b09 100644 --- a/typedapi/searchapplication/delete/delete.go +++ b/typedapi/searchapplication/delete/delete.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a search application. +// Delete a search application. +// +// Remove a search application and its associated alias. Indices attached to the +// search application are not removed. package delete import ( @@ -76,7 +79,10 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } } -// Deletes a search application. +// Delete a search application. +// +// Remove a search application and its associated alias. Indices attached to the +// search application are not removed. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-search-application.html func New(tp elastictransport.Interface) *Delete { @@ -290,7 +296,7 @@ func (r *Delete) Header(key, value string) *Delete { return r } -// Name The name of the search application to delete +// Name The name of the search application to delete. // API Name: name func (r *Delete) _name(name string) *Delete { r.paramSet |= nameMask diff --git a/typedapi/searchapplication/delete/response.go b/typedapi/searchapplication/delete/response.go index 6949e6f6a7..c289199ee8 100644 --- a/typedapi/searchapplication/delete/response.go +++ b/typedapi/searchapplication/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/delete/SearchApplicationsDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/delete/SearchApplicationsDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go b/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go index d138fb4ba0..5cb856c53a 100644 --- a/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go +++ b/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a behavioral analytics collection. +// The associated data stream is also deleted. package deletebehavioralanalytics import ( @@ -77,6 +78,7 @@ func NewDeleteBehavioralAnalyticsFunc(tp elastictransport.Interface) NewDeleteBe } // Delete a behavioral analytics collection. +// The associated data stream is also deleted. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-analytics-collection.html func New(tp elastictransport.Interface) *DeleteBehavioralAnalytics { diff --git a/typedapi/searchapplication/deletebehavioralanalytics/response.go b/typedapi/searchapplication/deletebehavioralanalytics/response.go index b04841451d..bf15691381 100644 --- a/typedapi/searchapplication/deletebehavioralanalytics/response.go +++ b/typedapi/searchapplication/deletebehavioralanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletebehavioralanalytics // Response holds the response body struct for the package deletebehavioralanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/searchapplication/get/get.go b/typedapi/searchapplication/get/get.go index 8397b1f3b5..21beaedf25 100644 --- a/typedapi/searchapplication/get/get.go +++ b/typedapi/searchapplication/get/get.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the details about a search application +// Get search application details. package get import ( @@ -76,7 +76,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } } -// Returns the details about a search application +// Get search application details. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-search-application.html func New(tp elastictransport.Interface) *Get { diff --git a/typedapi/searchapplication/get/response.go b/typedapi/searchapplication/get/response.go index a24ef61673..f44dbcd70e 100644 --- a/typedapi/searchapplication/get/response.go +++ b/typedapi/searchapplication/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -26,14 +26,14 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/get/SearchApplicationsGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/get/SearchApplicationsGetResponse.ts#L22-L24 type Response struct { // AnalyticsCollectionName Analytics collection associated to the Search Application. AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` // Indices Indices that are part of the Search Application. Indices []string `json:"indices"` - // Name Search Application name. + // Name Search Application name Name string `json:"name"` // Template Search template to use on search operations. Template *types.SearchApplicationTemplate `json:"template,omitempty"` diff --git a/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go b/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go index 6f66f266fd..44a6e3a558 100644 --- a/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go +++ b/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the existing behavioral analytics collections. +// Get behavioral analytics collections. package getbehavioralanalytics import ( @@ -74,7 +74,7 @@ func NewGetBehavioralAnalyticsFunc(tp elastictransport.Interface) NewGetBehavior } } -// Returns the existing behavioral analytics collections. +// Get behavioral analytics collections. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-analytics-collection.html func New(tp elastictransport.Interface) *GetBehavioralAnalytics { diff --git a/typedapi/searchapplication/getbehavioralanalytics/response.go b/typedapi/searchapplication/getbehavioralanalytics/response.go index 9ebc10090b..172643f87e 100644 --- a/typedapi/searchapplication/getbehavioralanalytics/response.go +++ b/typedapi/searchapplication/getbehavioralanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getbehavioralanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getbehavioralanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/get_behavioral_analytics/BehavioralAnalyticsGetResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/get_behavioral_analytics/BehavioralAnalyticsGetResponse.ts#L24-L27 type Response map[string]types.AnalyticsCollection diff --git a/typedapi/searchapplication/list/list.go b/typedapi/searchapplication/list/list.go index 06dbfe3b8f..53aaca454a 100644 --- a/typedapi/searchapplication/list/list.go +++ b/typedapi/searchapplication/list/list.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the existing search applications. +// Get search applications. +// Get information about search applications. package list import ( @@ -68,9 +69,10 @@ func NewListFunc(tp elastictransport.Interface) NewList { } } -// Returns the existing search applications. +// Get search applications. +// Get information about search applications. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/list-search-applications.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/list-analytics-collection.html func New(tp elastictransport.Interface) *List { r := &List{ transport: tp, diff --git a/typedapi/searchapplication/list/response.go b/typedapi/searchapplication/list/response.go index ef70eade23..f266e3fb72 100644 --- a/typedapi/searchapplication/list/response.go +++ b/typedapi/searchapplication/list/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package list @@ -26,10 +26,10 @@ import ( // Response holds the response body struct for the package list // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/list/SearchApplicationsListResponse.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/list/SearchApplicationsListResponse.ts#L23-L28 type Response struct { - Count int64 `json:"count"` - Results []types.SearchApplicationListItem `json:"results"` + Count int64 `json:"count"` + Results []types.SearchApplication `json:"results"` } // NewResponse returns a Response diff --git a/typedapi/searchapplication/postbehavioralanalyticsevent/post_behavioral_analytics_event.go b/typedapi/searchapplication/postbehavioralanalyticsevent/post_behavioral_analytics_event.go new file mode 100644 index 0000000000..05651f6977 --- /dev/null +++ b/typedapi/searchapplication/postbehavioralanalyticsevent/post_behavioral_analytics_event.go @@ -0,0 +1,431 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Create a behavioral analytics collection event. +package postbehavioralanalyticsevent + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + collectionnameMask = iota + 1 + + eventtypeMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostBehavioralAnalyticsEvent struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req any + deferred []func(request any) error + buf *gobytes.Buffer + + paramSet int + + collectionname string + eventtype string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostBehavioralAnalyticsEvent type alias for index. +type NewPostBehavioralAnalyticsEvent func(collectionname, eventtype string) *PostBehavioralAnalyticsEvent + +// NewPostBehavioralAnalyticsEventFunc returns a new instance of PostBehavioralAnalyticsEvent with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostBehavioralAnalyticsEventFunc(tp elastictransport.Interface) NewPostBehavioralAnalyticsEvent { + return func(collectionname, eventtype string) *PostBehavioralAnalyticsEvent { + n := New(tp) + + n._collectionname(collectionname) + + n._eventtype(eventtype) + + return n + } +} + +// Create a behavioral analytics collection event. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/post-analytics-collection-event.html +func New(tp elastictransport.Interface) *PostBehavioralAnalyticsEvent { + r := &PostBehavioralAnalyticsEvent{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PostBehavioralAnalyticsEvent) Raw(raw io.Reader) *PostBehavioralAnalyticsEvent { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PostBehavioralAnalyticsEvent) Request(req any) *PostBehavioralAnalyticsEvent { + r.req = req + + return r +} + +// Payload allows to set the request property with the appropriate payload. +func (r *PostBehavioralAnalyticsEvent) Payload(payload any) *PostBehavioralAnalyticsEvent { + r.req = payload + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostBehavioralAnalyticsEvent) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PostBehavioralAnalyticsEvent: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == collectionnameMask|eventtypeMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "collectionname", r.collectionname) + } + path.WriteString(r.collectionname) + path.WriteString("/") + path.WriteString("event") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "eventtype", r.eventtype) + } + path.WriteString(r.eventtype) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostBehavioralAnalyticsEvent) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.post_behavioral_analytics_event") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.post_behavioral_analytics_event") + if reader := instrument.RecordRequestBody(ctx, "search_application.post_behavioral_analytics_event", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.post_behavioral_analytics_event") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostBehavioralAnalyticsEvent query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a postbehavioralanalyticsevent.Response +func (r PostBehavioralAnalyticsEvent) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.post_behavioral_analytics_event") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + if res.StatusCode == 404 { + data, err := io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PostBehavioralAnalyticsEvent headers map. +func (r *PostBehavioralAnalyticsEvent) Header(key, value string) *PostBehavioralAnalyticsEvent { + r.headers.Set(key, value) + + return r +} + +// CollectionName The name of the behavioral analytics collection. +// API Name: collectionname +func (r *PostBehavioralAnalyticsEvent) _collectionname(collectionname string) *PostBehavioralAnalyticsEvent { + r.paramSet |= collectionnameMask + r.collectionname = collectionname + + return r +} + +// EventType The analytics event type. +// API Name: eventtype +func (r *PostBehavioralAnalyticsEvent) _eventtype(eventtype string) *PostBehavioralAnalyticsEvent { + r.paramSet |= eventtypeMask + r.eventtype = eventtype + + return r +} + +// Debug Whether the response type has to include more details +// API name: debug +func (r *PostBehavioralAnalyticsEvent) Debug(debug bool) *PostBehavioralAnalyticsEvent { + r.values.Set("debug", strconv.FormatBool(debug)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PostBehavioralAnalyticsEvent) ErrorTrace(errortrace bool) *PostBehavioralAnalyticsEvent { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PostBehavioralAnalyticsEvent) FilterPath(filterpaths ...string) *PostBehavioralAnalyticsEvent { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PostBehavioralAnalyticsEvent) Human(human bool) *PostBehavioralAnalyticsEvent { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PostBehavioralAnalyticsEvent) Pretty(pretty bool) *PostBehavioralAnalyticsEvent { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/searchapplication/postbehavioralanalyticsevent/request.go b/typedapi/searchapplication/postbehavioralanalyticsevent/request.go new file mode 100644 index 0000000000..5adbad58d7 --- /dev/null +++ b/typedapi/searchapplication/postbehavioralanalyticsevent/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package postbehavioralanalyticsevent + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package postbehavioralanalyticsevent +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/post_behavioral_analytics_event/BehavioralAnalyticsEventPostRequest.ts#L24-L57 +type Request = json.RawMessage + +// NewRequest returns a Request +func NewRequest() *Request { + r := new(json.RawMessage) + + return r +} diff --git a/typedapi/searchapplication/postbehavioralanalyticsevent/response.go b/typedapi/searchapplication/postbehavioralanalyticsevent/response.go new file mode 100644 index 0000000000..62ef7f9e13 --- /dev/null +++ b/typedapi/searchapplication/postbehavioralanalyticsevent/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package postbehavioralanalyticsevent + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package postbehavioralanalyticsevent +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/post_behavioral_analytics_event/BehavioralAnalyticsEventPostResponse.ts#L22-L47 +type Response struct { + Accepted bool `json:"accepted"` + Event json.RawMessage `json:"event,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/searchapplication/put/put.go b/typedapi/searchapplication/put/put.go index 965afb760e..b492b749ad 100644 --- a/typedapi/searchapplication/put/put.go +++ b/typedapi/searchapplication/put/put.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a search application. +// Create or update a search application. package put import ( @@ -81,7 +81,7 @@ func NewPutFunc(tp elastictransport.Interface) NewPut { } } -// Creates or updates a search application. +// Create or update a search application. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-search-application.html func New(tp elastictransport.Interface) *Put { @@ -91,8 +91,6 @@ func New(tp elastictransport.Interface) *Put { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -368,43 +366,43 @@ func (r *Put) Pretty(pretty bool) *Put { return r } -// AnalyticsCollectionName Analytics collection associated to the Search Application. +// Analytics collection associated to the Search Application. // API name: analytics_collection_name func (r *Put) AnalyticsCollectionName(name string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AnalyticsCollectionName = &name return r } -// Indices Indices that are part of the Search Application. +// Indices that are part of the Search Application. // API name: indices func (r *Put) Indices(indices ...string) *Put { - r.req.Indices = indices - - return r -} + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { -// Name Search Application name. -// API name: name -func (r *Put) Name(name string) *Put { - r.req.Name = name + r.req.Indices = append(r.req.Indices, v) + } return r } -// Template Search template to use on search operations. +// Search template to use on search operations. // API name: template -func (r *Put) Template(template *types.SearchApplicationTemplate) *Put { - - r.req.Template = template - - return r -} +func (r *Put) Template(template types.SearchApplicationTemplateVariant) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } -// UpdatedAtMillis Last time the Search Application was updated. -// API name: updated_at_millis -func (r *Put) UpdatedAtMillis(epochtimeunitmillis int64) *Put { - r.req.UpdatedAtMillis = epochtimeunitmillis + r.req.Template = template.SearchApplicationTemplateCaster() return r } diff --git a/typedapi/searchapplication/put/request.go b/typedapi/searchapplication/put/request.go index c242c87908..d8c386f774 100644 --- a/typedapi/searchapplication/put/request.go +++ b/typedapi/searchapplication/put/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package put @@ -26,12 +26,12 @@ import ( // Request holds the request body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/put/SearchApplicationsPutRequest.ts#L23-L48 -type Request = types.SearchApplication +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/put/SearchApplicationsPutRequest.ts#L23-L57 +type Request = types.SearchApplicationParameters // NewRequest returns a Request func NewRequest() *Request { - r := types.NewSearchApplication() + r := types.NewSearchApplicationParameters() return r } diff --git a/typedapi/searchapplication/put/response.go b/typedapi/searchapplication/put/response.go index 86a1522dd9..7551466caa 100644 --- a/typedapi/searchapplication/put/response.go +++ b/typedapi/searchapplication/put/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package put @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/put/SearchApplicationsPutResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/put/SearchApplicationsPutResponse.ts#L22-L26 type Response struct { Result result.Result `json:"result"` } diff --git a/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go b/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go index 1e00b4a752..4dc6753598 100644 --- a/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go +++ b/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a behavioral analytics collection. +// Create a behavioral analytics collection. package putbehavioralanalytics import ( @@ -76,7 +76,7 @@ func NewPutBehavioralAnalyticsFunc(tp elastictransport.Interface) NewPutBehavior } } -// Creates a behavioral analytics collection. +// Create a behavioral analytics collection. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-analytics-collection.html func New(tp elastictransport.Interface) *PutBehavioralAnalytics { diff --git a/typedapi/searchapplication/putbehavioralanalytics/response.go b/typedapi/searchapplication/putbehavioralanalytics/response.go index f2620be24b..a8126508d0 100644 --- a/typedapi/searchapplication/putbehavioralanalytics/response.go +++ b/typedapi/searchapplication/putbehavioralanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putbehavioralanalytics // Response holds the response body struct for the package putbehavioralanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/put_behavioral_analytics/BehavioralAnalyticsPutResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/put_behavioral_analytics/BehavioralAnalyticsPutResponse.ts#L23-L25 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/searchapplication/renderquery/render_query.go b/typedapi/searchapplication/renderquery/render_query.go new file mode 100644 index 0000000000..f21aee6f8e --- /dev/null +++ b/typedapi/searchapplication/renderquery/render_query.go @@ -0,0 +1,409 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Render a search application query. +// Generate an Elasticsearch query using the specified query parameters and the +// search template associated with the search application or a default template +// if none is specified. +// If a parameter used in the search template is not specified in `params`, the +// parameter's default value will be used. +// The API returns the specific Elasticsearch query that would be generated and +// run by calling the search application search API. +// +// You must have `read` privileges on the backing alias of the search +// application. +package renderquery + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RenderQuery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRenderQuery type alias for index. +type NewRenderQuery func(name string) *RenderQuery + +// NewRenderQueryFunc returns a new instance of RenderQuery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRenderQueryFunc(tp elastictransport.Interface) NewRenderQuery { + return func(name string) *RenderQuery { + n := New(tp) + + n._name(name) + + return n + } +} + +// Render a search application query. +// Generate an Elasticsearch query using the specified query parameters and the +// search template associated with the search application or a default template +// if none is specified. +// If a parameter used in the search template is not specified in `params`, the +// parameter's default value will be used. +// The API returns the specific Elasticsearch query that would be generated and +// run by calling the search application search API. +// +// You must have `read` privileges on the backing alias of the search +// application. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-application-render-query.html +func New(tp elastictransport.Interface) *RenderQuery { + r := &RenderQuery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *RenderQuery) Raw(raw io.Reader) *RenderQuery { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *RenderQuery) Request(req *Request) *RenderQuery { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RenderQuery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for RenderQuery: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_render_query") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RenderQuery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.render_query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.render_query") + if reader := instrument.RecordRequestBody(ctx, "search_application.render_query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.render_query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RenderQuery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a renderquery.Response +func (r RenderQuery) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.render_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the RenderQuery headers map. +func (r *RenderQuery) Header(key, value string) *RenderQuery { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application to render teh query for. +// API Name: name +func (r *RenderQuery) _name(name string) *RenderQuery { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RenderQuery) ErrorTrace(errortrace bool) *RenderQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RenderQuery) FilterPath(filterpaths ...string) *RenderQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RenderQuery) Human(human bool) *RenderQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RenderQuery) Pretty(pretty bool) *RenderQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: params +func (r *RenderQuery) Params(params map[string]json.RawMessage) *RenderQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Params = params + return r +} + +func (r *RenderQuery) AddParam(key string, value json.RawMessage) *RenderQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp + return r +} diff --git a/typedapi/searchapplication/renderquery/request.go b/typedapi/searchapplication/renderquery/request.go new file mode 100644 index 0000000000..d86285db6f --- /dev/null +++ b/typedapi/searchapplication/renderquery/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package renderquery + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package renderquery +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/render_query/SearchApplicationsRenderQueryRequest.ts#L24-L54 +type Request struct { + Params map[string]json.RawMessage `json:"params,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Params: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Renderquery request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/searchapplication/renderquery/response.go b/typedapi/searchapplication/renderquery/response.go new file mode 100644 index 0000000000..7ad1b4a7a3 --- /dev/null +++ b/typedapi/searchapplication/renderquery/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package renderquery + +// Response holds the response body struct for the package renderquery +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/render_query/SearchApplicationsRenderQueryResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/searchapplication/search/request.go b/typedapi/searchapplication/search/request.go index af57816981..e6c8e8faef 100644 --- a/typedapi/searchapplication/search/request.go +++ b/typedapi/searchapplication/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package search @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/search/SearchApplicationsSearchRequest.ts#L24-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/search/SearchApplicationsSearchRequest.ts#L24-L61 type Request struct { // Params Query parameters specific to this request, which will override any defaults diff --git a/typedapi/searchapplication/search/response.go b/typedapi/searchapplication/search/response.go index e5c45808d5..21f0617f1a 100644 --- a/typedapi/searchapplication/search/response.go +++ b/typedapi/searchapplication/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package search @@ -34,22 +34,46 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/search/SearchApplicationsSearchResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/search/SearchApplicationsSearchResponse.ts#L22-L24 type Response struct { - Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` - Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits types.HitsMetadata `json:"hits"` - MaxScore *types.Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *types.Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. Shards_ types.ShardStatistics `json:"_shards"` Suggest map[string][]types.Suggest `json:"suggest,omitempty"` TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` } // NewResponse returns a Response @@ -504,6 +528,13 @@ func (s *Response) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { diff --git a/typedapi/searchapplication/search/search.go b/typedapi/searchapplication/search/search.go index 9f00792366..8788ee1ae3 100644 --- a/typedapi/searchapplication/search/search.go +++ b/typedapi/searchapplication/search/search.go @@ -16,9 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Perform a search against a search application. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Run a search application search. +// Generate and run an Elasticsearch query that uses the specified query +// parameteter and the search template associated with the search application or +// default template. +// Unspecified template parameters are assigned their default values if +// applicable. package search import ( @@ -81,7 +86,12 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { } } -// Perform a search against a search application. +// Run a search application search. +// Generate and run an Elasticsearch query that uses the specified query +// parameteter and the search template associated with the search application or +// default template. +// Unspecified template parameters are assigned their default values if +// applicable. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-application-search.html func New(tp elastictransport.Interface) *Search { @@ -91,8 +101,6 @@ func New(tp elastictransport.Interface) *Search { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -372,12 +380,33 @@ func (r *Search) Pretty(pretty bool) *Search { return r } -// Params Query parameters specific to this request, which will override any defaults +// Query parameters specific to this request, which will override any defaults // specified in the template. // API name: params func (r *Search) Params(params map[string]json.RawMessage) *Search { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Params = params + return r +} + +func (r *Search) AddParam(key string, value json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + r.req.Params = tmp return r } diff --git a/typedapi/security/activateuserprofile/activate_user_profile.go b/typedapi/security/activateuserprofile/activate_user_profile.go index 6dc1fdeb07..26cad367e9 100644 --- a/typedapi/security/activateuserprofile/activate_user_profile.go +++ b/typedapi/security/activateuserprofile/activate_user_profile.go @@ -16,9 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a user profile on behalf of another user. +// Activate a user profile. +// +// Create or update a user profile on behalf of another user. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// The calling application must have either an `access_token` or a combination +// of `username` and `password` for the user that the profile document is +// intended for. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// This API creates or updates a profile document for end users with information +// that is extracted from the user's authentication object including `username`, +// `full_name,` `roles`, and the authentication realm. +// For example, in the JWT `access_token` case, the profile user's `username` is +// extracted from the JWT token claim pointed to by the `claims.principal` +// setting of the JWT realm that authenticated the token. +// +// When updating a profile document, the API enables the document if it was +// disabled. +// Any updates do not change existing content for either the `labels` or `data` +// fields. package activateuserprofile import ( @@ -74,7 +97,30 @@ func NewActivateUserProfileFunc(tp elastictransport.Interface) NewActivateUserPr } } -// Creates or updates a user profile on behalf of another user. +// Activate a user profile. +// +// Create or update a user profile on behalf of another user. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// The calling application must have either an `access_token` or a combination +// of `username` and `password` for the user that the profile document is +// intended for. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// This API creates or updates a profile document for end users with information +// that is extracted from the user's authentication object including `username`, +// `full_name,` `roles`, and the authentication realm. +// For example, in the JWT `access_token` case, the profile user's `username` is +// extracted from the JWT token claim pointed to by the `claims.principal` +// setting of the JWT realm that authenticated the token. +// +// When updating a profile document, the API enables the document if it was +// disabled. +// Any updates do not change existing content for either the `labels` or `data` +// fields. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-activate-user-profile.html func New(tp elastictransport.Interface) *ActivateUserProfile { @@ -84,8 +130,6 @@ func New(tp elastictransport.Interface) *ActivateUserProfile { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -339,31 +383,58 @@ func (r *ActivateUserProfile) Pretty(pretty bool) *ActivateUserProfile { return r } +// The user's Elasticsearch access token or JWT. +// Both `access` and `id` JWT token types are supported and they depend on the +// underlying JWT realm configuration. +// If you specify the `access_token` grant type, this parameter is required. +// It is not valid with other grant types. // API name: access_token func (r *ActivateUserProfile) AccessToken(accesstoken string) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AccessToken = &accesstoken return r } +// The type of grant. // API name: grant_type func (r *ActivateUserProfile) GrantType(granttype granttype.GrantType) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GrantType = granttype - return r } +// The user's password. +// If you specify the `password` grant type, this parameter is required. +// It is not valid with other grant types. // API name: password func (r *ActivateUserProfile) Password(password string) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Password = &password return r } +// The username that identifies the user. +// If you specify the `password` grant type, this parameter is required. +// It is not valid with other grant types. // API name: username func (r *ActivateUserProfile) Username(username string) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Username = &username diff --git a/typedapi/security/activateuserprofile/request.go b/typedapi/security/activateuserprofile/request.go index 35c2bf0b36..092bf99eaa 100644 --- a/typedapi/security/activateuserprofile/request.go +++ b/typedapi/security/activateuserprofile/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package activateuserprofile @@ -29,12 +29,25 @@ import ( // Request holds the request body struct for the package activateuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/activate_user_profile/Request.ts#L23-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/activate_user_profile/Request.ts#L23-L76 type Request struct { - AccessToken *string `json:"access_token,omitempty"` - GrantType granttype.GrantType `json:"grant_type"` - Password *string `json:"password,omitempty"` - Username *string `json:"username,omitempty"` + + // AccessToken The user's Elasticsearch access token or JWT. + // Both `access` and `id` JWT token types are supported and they depend on the + // underlying JWT realm configuration. + // If you specify the `access_token` grant type, this parameter is required. + // It is not valid with other grant types. + AccessToken *string `json:"access_token,omitempty"` + // GrantType The type of grant. + GrantType granttype.GrantType `json:"grant_type"` + // Password The user's password. + // If you specify the `password` grant type, this parameter is required. + // It is not valid with other grant types. + Password *string `json:"password,omitempty"` + // Username The username that identifies the user. + // If you specify the `password` grant type, this parameter is required. + // It is not valid with other grant types. + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/activateuserprofile/response.go b/typedapi/security/activateuserprofile/response.go index 311674188a..13b600f8fd 100644 --- a/typedapi/security/activateuserprofile/response.go +++ b/typedapi/security/activateuserprofile/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package activateuserprofile @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package activateuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/activate_user_profile/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/activate_user_profile/Response.ts#L22-L24 type Response struct { Data map[string]json.RawMessage `json:"data"` Doc_ types.UserProfileHitMetadata `json:"_doc"` diff --git a/typedapi/security/authenticate/authenticate.go b/typedapi/security/authenticate/authenticate.go index 6716ad93bd..26b0416cef 100644 --- a/typedapi/security/authenticate/authenticate.go +++ b/typedapi/security/authenticate/authenticate.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Authenticate a user. +// // Authenticates a user and returns information about the authenticated user. // Include the user information in a [basic auth // header](https://en.wikipedia.org/wiki/Basic_access_authentication). @@ -77,6 +78,7 @@ func NewAuthenticateFunc(tp elastictransport.Interface) NewAuthenticate { } // Authenticate a user. +// // Authenticates a user and returns information about the authenticated user. // Include the user information in a [basic auth // header](https://en.wikipedia.org/wiki/Basic_access_authentication). diff --git a/typedapi/security/authenticate/response.go b/typedapi/security/authenticate/response.go index 62ba7dceef..bd54e337d3 100644 --- a/typedapi/security/authenticate/response.go +++ b/typedapi/security/authenticate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package authenticate @@ -26,19 +26,19 @@ import ( // Response holds the response body struct for the package authenticate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/authenticate/SecurityAuthenticateResponse.ts#L25-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/authenticate/SecurityAuthenticateResponse.ts#L24-L42 type Response struct { - ApiKey *types.ApiKey `json:"api_key,omitempty"` - AuthenticationRealm types.RealmInfo `json:"authentication_realm"` - AuthenticationType string `json:"authentication_type"` - Email *string `json:"email,omitempty"` - Enabled bool `json:"enabled"` - FullName *string `json:"full_name,omitempty"` - LookupRealm types.RealmInfo `json:"lookup_realm"` - Metadata types.Metadata `json:"metadata"` - Roles []string `json:"roles"` - Token *types.AuthenticateToken `json:"token,omitempty"` - Username string `json:"username"` + ApiKey *types.AuthenticateApiKey `json:"api_key,omitempty"` + AuthenticationRealm types.RealmInfo `json:"authentication_realm"` + AuthenticationType string `json:"authentication_type"` + Email *string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName *string `json:"full_name,omitempty"` + LookupRealm types.RealmInfo `json:"lookup_realm"` + Metadata types.Metadata `json:"metadata"` + Roles []string `json:"roles"` + Token *types.AuthenticateToken `json:"token,omitempty"` + Username string `json:"username"` } // NewResponse returns a Response diff --git a/typedapi/security/bulkdeleterole/bulk_delete_role.go b/typedapi/security/bulkdeleterole/bulk_delete_role.go index b8d2e8137c..e6b3409c95 100644 --- a/typedapi/security/bulkdeleterole/bulk_delete_role.go +++ b/typedapi/security/bulkdeleterole/bulk_delete_role.go @@ -16,8 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Bulk delete roles. +// // The role management APIs are generally the preferred way to manage roles, // rather than using file-based role management. // The bulk delete roles API cannot delete roles that are defined in roles @@ -77,6 +79,8 @@ func NewBulkDeleteRoleFunc(tp elastictransport.Interface) NewBulkDeleteRole { } } +// Bulk delete roles. +// // The role management APIs are generally the preferred way to manage roles, // rather than using file-based role management. // The bulk delete roles API cannot delete roles that are defined in roles @@ -90,8 +94,6 @@ func New(tp elastictransport.Interface) *BulkDeleteRole { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -353,10 +355,17 @@ func (r *BulkDeleteRole) Pretty(pretty bool) *BulkDeleteRole { return r } -// Names An array of role names to delete +// An array of role names to delete // API name: names func (r *BulkDeleteRole) Names(names ...string) *BulkDeleteRole { - r.req.Names = names + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range names { + + r.req.Names = append(r.req.Names, v) + } return r } diff --git a/typedapi/security/bulkdeleterole/request.go b/typedapi/security/bulkdeleterole/request.go index 0f5b982a7c..46442018a2 100644 --- a/typedapi/security/bulkdeleterole/request.go +++ b/typedapi/security/bulkdeleterole/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package bulkdeleterole @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package bulkdeleterole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/bulk_delete_role/SecurityBulkDeleteRoleRequest.ts#L23-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/bulk_delete_role/SecurityBulkDeleteRoleRequest.ts#L23-L50 type Request struct { // Names An array of role names to delete diff --git a/typedapi/security/bulkdeleterole/response.go b/typedapi/security/bulkdeleterole/response.go index 3c33989b74..8164b79a66 100644 --- a/typedapi/security/bulkdeleterole/response.go +++ b/typedapi/security/bulkdeleterole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package bulkdeleterole @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package bulkdeleterole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/bulk_delete_role/SecurityBulkDeleteRoleResponse.ts#L25-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/bulk_delete_role/SecurityBulkDeleteRoleResponse.ts#L22-L37 type Response struct { // Deleted Array of deleted roles diff --git a/typedapi/security/bulkputrole/bulk_put_role.go b/typedapi/security/bulkputrole/bulk_put_role.go index ba425dc5c2..7477b9cf30 100644 --- a/typedapi/security/bulkputrole/bulk_put_role.go +++ b/typedapi/security/bulkputrole/bulk_put_role.go @@ -16,8 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Bulk create or update roles. +// // The role management APIs are generally the preferred way to manage roles, // rather than using file-based role management. // The bulk create or update roles API cannot update roles that are defined in @@ -77,6 +79,8 @@ func NewBulkPutRoleFunc(tp elastictransport.Interface) NewBulkPutRole { } } +// Bulk create or update roles. +// // The role management APIs are generally the preferred way to manage roles, // rather than using file-based role management. // The bulk create or update roles API cannot update roles that are defined in @@ -90,8 +94,6 @@ func New(tp elastictransport.Interface) *BulkPutRole { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -353,11 +355,32 @@ func (r *BulkPutRole) Pretty(pretty bool) *BulkPutRole { return r } -// Roles A dictionary of role name to RoleDescriptor objects to add or update +// A dictionary of role name to RoleDescriptor objects to add or update // API name: roles func (r *BulkPutRole) Roles(roles map[string]types.RoleDescriptor) *BulkPutRole { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Roles = roles + return r +} + +func (r *BulkPutRole) AddRole(key string, value types.RoleDescriptorVariant) *BulkPutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.Roles == nil { + r.req.Roles = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.Roles + } + + tmp[key] = *value.RoleDescriptorCaster() + r.req.Roles = tmp return r } diff --git a/typedapi/security/bulkputrole/request.go b/typedapi/security/bulkputrole/request.go index ac068977de..431b9c61a3 100644 --- a/typedapi/security/bulkputrole/request.go +++ b/typedapi/security/bulkputrole/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package bulkputrole @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package bulkputrole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/bulk_put_role/SecurityBulkPutRoleRequest.ts#L25-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/bulk_put_role/SecurityBulkPutRoleRequest.ts#L25-L52 type Request struct { // Roles A dictionary of role name to RoleDescriptor objects to add or update diff --git a/typedapi/security/bulkputrole/response.go b/typedapi/security/bulkputrole/response.go index 1c8295aefc..dc9a1cc63b 100644 --- a/typedapi/security/bulkputrole/response.go +++ b/typedapi/security/bulkputrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package bulkputrole @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package bulkputrole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/bulk_put_role/SecurityBulkPutRoleResponse.ts#L22-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/bulk_put_role/SecurityBulkPutRoleResponse.ts#L22-L41 type Response struct { // Created Array of created roles diff --git a/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go b/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go index a17610f5b5..48d4a2e23c 100644 --- a/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go +++ b/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go @@ -16,21 +16,52 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates the attributes of multiple existing API keys. +// Bulk update API keys. +// Update the attributes for multiple API keys. +// +// IMPORTANT: It is not possible to use an API key as the authentication +// credential for this API. To update API keys, the owner user's credentials are +// required. +// +// This API is similar to the update API key API but enables you to apply the +// same update to multiple API keys in one API call. This operation can greatly +// improve performance over making individual updates. +// +// It is not possible to update expired or invalidated API keys. +// +// This API supports updates to API key access scope, metadata and expiration. +// The access scope of each API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change an API key's access scope. This change can occur +// if the owner user's permissions have changed since the API key was created or +// last modified. +// +// A successful request returns a JSON structure that contains the IDs of all +// updated API keys, the IDs of API keys that already had the requested changes +// and did not require an update, and error details for any failed update. package bulkupdateapikeys import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +76,10 @@ type BulkUpdateApiKeys struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,7 +100,34 @@ func NewBulkUpdateApiKeysFunc(tp elastictransport.Interface) NewBulkUpdateApiKey } } -// Updates the attributes of multiple existing API keys. +// Bulk update API keys. +// Update the attributes for multiple API keys. +// +// IMPORTANT: It is not possible to use an API key as the authentication +// credential for this API. To update API keys, the owner user's credentials are +// required. +// +// This API is similar to the update API key API but enables you to apply the +// same update to multiple API keys in one API call. This operation can greatly +// improve performance over making individual updates. +// +// It is not possible to update expired or invalidated API keys. +// +// This API supports updates to API key access scope, metadata and expiration. +// The access scope of each API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change an API key's access scope. This change can occur +// if the owner user's permissions have changed since the API key was created or +// last modified. +// +// A successful request returns a JSON structure that contains the IDs of all +// updated API keys, the IDs of API keys that already had the requested changes +// and did not require an update, and error details for any failed update. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-update-api-keys.html func New(tp elastictransport.Interface) *BulkUpdateApiKeys { @@ -73,6 +135,8 @@ func New(tp elastictransport.Interface) *BulkUpdateApiKeys { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +148,21 @@ func New(tp elastictransport.Interface) *BulkUpdateApiKeys { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *BulkUpdateApiKeys) Raw(raw io.Reader) *BulkUpdateApiKeys { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *BulkUpdateApiKeys) Request(req *Request) *BulkUpdateApiKeys { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *BulkUpdateApiKeys) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +172,31 @@ func (r *BulkUpdateApiKeys) HttpRequest(ctx context.Context) (*http.Request, err var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for BulkUpdateApiKeys: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -182,13 +286,7 @@ func (r BulkUpdateApiKeys) Perform(providedCtx context.Context) (*http.Response, } // Do runs the request through the transport, handle the response and returns a bulkupdateapikeys.Response -func (r BulkUpdateApiKeys) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r BulkUpdateApiKeys) IsSuccess(providedCtx context.Context) (bool, error) { +func (r BulkUpdateApiKeys) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -199,30 +297,46 @@ func (r BulkUpdateApiKeys) IsSuccess(providedCtx context.Context) (bool, error) ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the BulkUpdateApiKeys query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode } - return false, nil + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the BulkUpdateApiKeys headers map. @@ -231,3 +345,133 @@ func (r *BulkUpdateApiKeys) Header(key, value string) *BulkUpdateApiKeys { return r } + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *BulkUpdateApiKeys) ErrorTrace(errortrace bool) *BulkUpdateApiKeys { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *BulkUpdateApiKeys) FilterPath(filterpaths ...string) *BulkUpdateApiKeys { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *BulkUpdateApiKeys) Human(human bool) *BulkUpdateApiKeys { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *BulkUpdateApiKeys) Pretty(pretty bool) *BulkUpdateApiKeys { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Expiration time for the API keys. +// By default, API keys never expire. +// This property can be omitted to leave the value unchanged. +// API name: expiration +func (r *BulkUpdateApiKeys) Expiration(duration types.DurationVariant) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() + + return r +} + +// The API key identifiers. +// API name: ids +func (r *BulkUpdateApiKeys) Ids(ids ...string) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ids = make([]string, len(ids)) + r.req.Ids = ids + + return r +} + +// Arbitrary nested metadata to associate with the API keys. +// Within the `metadata` object, top-level keys beginning with an underscore +// (`_`) are reserved for system usage. +// Any information specified with this parameter fully replaces metadata +// previously associated with the API key. +// API name: metadata +func (r *BulkUpdateApiKeys) Metadata(metadata types.MetadataVariant) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// The role descriptors to assign to the API keys. +// An API key's effective permissions are an intersection of its assigned +// privileges and the point-in-time snapshot of permissions of the owner user. +// You can assign new privileges by specifying them in this parameter. +// To remove assigned privileges, supply the `role_descriptors` parameter as an +// empty object `{}`. +// If an API key has no assigned privileges, it inherits the owner user's full +// permissions. +// The snapshot of the owner's permissions is always updated, whether you supply +// the `role_descriptors` parameter. +// The structure of a role descriptor is the same as the request for the create +// API keys API. +// API name: role_descriptors +func (r *BulkUpdateApiKeys) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RoleDescriptors = roledescriptors + return r +} + +func (r *BulkUpdateApiKeys) AddRoleDescriptor(key string, value types.RoleDescriptorVariant) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.RoleDescriptors == nil { + r.req.RoleDescriptors = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.RoleDescriptors + } + + tmp[key] = *value.RoleDescriptorCaster() + + r.req.RoleDescriptors = tmp + return r +} diff --git a/typedapi/security/bulkupdateapikeys/request.go b/typedapi/security/bulkupdateapikeys/request.go new file mode 100644 index 0000000000..3df09abcb5 --- /dev/null +++ b/typedapi/security/bulkupdateapikeys/request.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package bulkupdateapikeys + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package bulkupdateapikeys +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/bulk_update_api_keys/SecurityBulkUpdateApiKeysRequest.ts#L26-L83 +type Request struct { + + // Expiration Expiration time for the API keys. + // By default, API keys never expire. + // This property can be omitted to leave the value unchanged. + Expiration types.Duration `json:"expiration,omitempty"` + // Ids The API key identifiers. + Ids []string `json:"ids"` + // Metadata Arbitrary nested metadata to associate with the API keys. + // Within the `metadata` object, top-level keys beginning with an underscore + // (`_`) are reserved for system usage. + // Any information specified with this parameter fully replaces metadata + // previously associated with the API key. + Metadata types.Metadata `json:"metadata,omitempty"` + // RoleDescriptors The role descriptors to assign to the API keys. + // An API key's effective permissions are an intersection of its assigned + // privileges and the point-in-time snapshot of permissions of the owner user. + // You can assign new privileges by specifying them in this parameter. + // To remove assigned privileges, supply the `role_descriptors` parameter as an + // empty object `{}`. + // If an API key has no assigned privileges, it inherits the owner user's full + // permissions. + // The snapshot of the owner's permissions is always updated, whether you supply + // the `role_descriptors` parameter. + // The structure of a role descriptor is the same as the request for the create + // API keys API. + RoleDescriptors map[string]types.RoleDescriptor `json:"role_descriptors,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + RoleDescriptors: make(map[string]types.RoleDescriptor, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Bulkupdateapikeys request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + s.Ids = append(s.Ids, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "role_descriptors": + if s.RoleDescriptors == nil { + s.RoleDescriptors = make(map[string]types.RoleDescriptor, 0) + } + if err := dec.Decode(&s.RoleDescriptors); err != nil { + return fmt.Errorf("%s | %w", "RoleDescriptors", err) + } + + } + } + return nil +} diff --git a/typedapi/security/bulkupdateapikeys/response.go b/typedapi/security/bulkupdateapikeys/response.go new file mode 100644 index 0000000000..59e24474de --- /dev/null +++ b/typedapi/security/bulkupdateapikeys/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package bulkupdateapikeys + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package bulkupdateapikeys +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/bulk_update_api_keys/SecurityBulkUpdateApiKeysResponse.ts#L22-L28 +type Response struct { + Errors *types.BulkError `json:"errors,omitempty"` + Noops []string `json:"noops"` + Updated []string `json:"updated"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/changepassword/change_password.go b/typedapi/security/changepassword/change_password.go index 8817f8cb66..b335e0074d 100644 --- a/typedapi/security/changepassword/change_password.go +++ b/typedapi/security/changepassword/change_password.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Changes the passwords of users in the native realm and built-in users. +// Change passwords. +// +// Change the passwords of users in the native realm and built-in users. package changepassword import ( @@ -80,7 +82,9 @@ func NewChangePasswordFunc(tp elastictransport.Interface) NewChangePassword { } } -// Changes the passwords of users in the native realm and built-in users. +// Change passwords. +// +// Change the passwords of users in the native realm and built-in users. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html func New(tp elastictransport.Interface) *ChangePassword { @@ -90,8 +94,6 @@ func New(tp elastictransport.Interface) *ChangePassword { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -380,21 +382,30 @@ func (r *ChangePassword) Pretty(pretty bool) *ChangePassword { return r } -// Password The new password value. Passwords must be at least 6 characters long. +// The new password value. Passwords must be at least 6 characters long. // API name: password func (r *ChangePassword) Password(password string) *ChangePassword { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Password = &password return r } -// PasswordHash A hash of the new password value. This must be produced using the same +// A hash of the new password value. This must be produced using the same // hashing algorithm as has been configured for password storage. For more // details, // see the explanation of the `xpack.security.authc.password_hashing.algorithm` // setting. // API name: password_hash func (r *ChangePassword) PasswordHash(passwordhash string) *ChangePassword { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.PasswordHash = &passwordhash diff --git a/typedapi/security/changepassword/request.go b/typedapi/security/changepassword/request.go index b7547d61b0..6d7d355a90 100644 --- a/typedapi/security/changepassword/request.go +++ b/typedapi/security/changepassword/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package changepassword @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package changepassword // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/change_password/SecurityChangePasswordRequest.ts#L23-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/change_password/SecurityChangePasswordRequest.ts#L23-L65 type Request struct { // Password The new password value. Passwords must be at least 6 characters long. diff --git a/typedapi/security/changepassword/response.go b/typedapi/security/changepassword/response.go index 4cd912a837..ffff967e04 100644 --- a/typedapi/security/changepassword/response.go +++ b/typedapi/security/changepassword/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package changepassword // Response holds the response body struct for the package changepassword // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/change_password/SecurityChangePasswordResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/change_password/SecurityChangePasswordResponse.ts#L20-L22 type Response struct { } diff --git a/typedapi/security/clearapikeycache/clear_api_key_cache.go b/typedapi/security/clearapikeycache/clear_api_key_cache.go index fb0c6ac142..a693d47f5d 100644 --- a/typedapi/security/clearapikeycache/clear_api_key_cache.go +++ b/typedapi/security/clearapikeycache/clear_api_key_cache.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Evicts a subset of all entries from the API key cache. +// Clear the API key cache. +// +// Evict a subset of all entries from the API key cache. // The cache is also automatically cleared on state changes of the security // index. package clearapikeycache @@ -78,7 +80,9 @@ func NewClearApiKeyCacheFunc(tp elastictransport.Interface) NewClearApiKeyCache } } -// Evicts a subset of all entries from the API key cache. +// Clear the API key cache. +// +// Evict a subset of all entries from the API key cache. // The cache is also automatically cleared on state changes of the security // index. // diff --git a/typedapi/security/clearapikeycache/response.go b/typedapi/security/clearapikeycache/response.go index bdb2a1a5e6..641d101f3a 100644 --- a/typedapi/security/clearapikeycache/response.go +++ b/typedapi/security/clearapikeycache/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearapikeycache @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearapikeycache // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/clear_api_key_cache/SecurityClearApiKeyCacheResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/clear_api_key_cache/SecurityClearApiKeyCacheResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/clearcachedprivileges/clear_cached_privileges.go b/typedapi/security/clearcachedprivileges/clear_cached_privileges.go index 1c8d9c60e9..207228b434 100644 --- a/typedapi/security/clearcachedprivileges/clear_cached_privileges.go +++ b/typedapi/security/clearcachedprivileges/clear_cached_privileges.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Evicts application privileges from the native application privileges cache. +// Clear the privileges cache. +// +// Evict privileges from the native application privilege cache. +// The cache is also automatically cleared for applications that have their +// privileges updated. package clearcachedprivileges import ( @@ -76,7 +80,11 @@ func NewClearCachedPrivilegesFunc(tp elastictransport.Interface) NewClearCachedP } } -// Evicts application privileges from the native application privileges cache. +// Clear the privileges cache. +// +// Evict privileges from the native application privilege cache. +// The cache is also automatically cleared for applications that have their +// privileges updated. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-privilege-cache.html func New(tp elastictransport.Interface) *ClearCachedPrivileges { @@ -292,7 +300,9 @@ func (r *ClearCachedPrivileges) Header(key, value string) *ClearCachedPrivileges return r } -// Application A comma-separated list of application names +// Application A comma-separated list of applications. +// To clear all applications, use an asterism (`*`). +// It does not support other wildcard patterns. // API Name: application func (r *ClearCachedPrivileges) _application(application string) *ClearCachedPrivileges { r.paramSet |= applicationMask diff --git a/typedapi/security/clearcachedprivileges/response.go b/typedapi/security/clearcachedprivileges/response.go index a964554307..552ae1b8c0 100644 --- a/typedapi/security/clearcachedprivileges/response.go +++ b/typedapi/security/clearcachedprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearcachedprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/clear_cached_privileges/SecurityClearCachedPrivilegesResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/clear_cached_privileges/SecurityClearCachedPrivilegesResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/clearcachedrealms/clear_cached_realms.go b/typedapi/security/clearcachedrealms/clear_cached_realms.go index a26a0d29a3..5c8fd1895c 100644 --- a/typedapi/security/clearcachedrealms/clear_cached_realms.go +++ b/typedapi/security/clearcachedrealms/clear_cached_realms.go @@ -16,10 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Evicts users from the user cache. Can completely clear the cache or evict -// specific users. +// Clear the user cache. +// +// Evict users from the user cache. +// You can completely clear the cache or evict specific users. +// +// User credentials are cached in memory on each node to avoid connecting to a +// remote authentication service or hitting the disk for every incoming request. +// There are realm settings that you can use to configure the user cache. +// For more information, refer to the documentation about controlling the user +// cache. package clearcachedrealms import ( @@ -77,8 +85,16 @@ func NewClearCachedRealmsFunc(tp elastictransport.Interface) NewClearCachedRealm } } -// Evicts users from the user cache. Can completely clear the cache or evict -// specific users. +// Clear the user cache. +// +// Evict users from the user cache. +// You can completely clear the cache or evict specific users. +// +// User credentials are cached in memory on each node to avoid connecting to a +// remote authentication service or hitting the disk for every incoming request. +// There are realm settings that you can use to configure the user cache. +// For more information, refer to the documentation about controlling the user +// cache. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html func New(tp elastictransport.Interface) *ClearCachedRealms { @@ -294,7 +310,9 @@ func (r *ClearCachedRealms) Header(key, value string) *ClearCachedRealms { return r } -// Realms Comma-separated list of realms to clear +// Realms A comma-separated list of realms. +// To clear all realms, use an asterisk (`*`). +// It does not support other wildcard patterns. // API Name: realms func (r *ClearCachedRealms) _realms(realms string) *ClearCachedRealms { r.paramSet |= realmsMask @@ -303,7 +321,9 @@ func (r *ClearCachedRealms) _realms(realms string) *ClearCachedRealms { return r } -// Usernames Comma-separated list of usernames to clear from the cache +// Usernames A comma-separated list of the users to clear from the cache. +// If you do not specify this parameter, the API evicts all users from the user +// cache. // API name: usernames func (r *ClearCachedRealms) Usernames(usernames ...string) *ClearCachedRealms { tmp := []string{} diff --git a/typedapi/security/clearcachedrealms/response.go b/typedapi/security/clearcachedrealms/response.go index d09b8b6801..df84690082 100644 --- a/typedapi/security/clearcachedrealms/response.go +++ b/typedapi/security/clearcachedrealms/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearcachedrealms @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedrealms // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/clear_cached_realms/SecurityClearCachedRealmsResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/clear_cached_realms/SecurityClearCachedRealmsResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/clearcachedroles/clear_cached_roles.go b/typedapi/security/clearcachedroles/clear_cached_roles.go index c4e01c0431..9833bcf443 100644 --- a/typedapi/security/clearcachedroles/clear_cached_roles.go +++ b/typedapi/security/clearcachedroles/clear_cached_roles.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Evicts roles from the native role cache. +// Clear the roles cache. +// +// Evict roles from the native role cache. package clearcachedroles import ( @@ -76,7 +78,9 @@ func NewClearCachedRolesFunc(tp elastictransport.Interface) NewClearCachedRoles } } -// Evicts roles from the native role cache. +// Clear the roles cache. +// +// Evict roles from the native role cache. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html func New(tp elastictransport.Interface) *ClearCachedRoles { @@ -292,7 +296,9 @@ func (r *ClearCachedRoles) Header(key, value string) *ClearCachedRoles { return r } -// Name Role name +// Name A comma-separated list of roles to evict from the role cache. +// To evict all roles, use an asterisk (`*`). +// It does not support other wildcard patterns. // API Name: name func (r *ClearCachedRoles) _name(name string) *ClearCachedRoles { r.paramSet |= nameMask diff --git a/typedapi/security/clearcachedroles/response.go b/typedapi/security/clearcachedroles/response.go index 563bb3a5ce..426c3a951c 100644 --- a/typedapi/security/clearcachedroles/response.go +++ b/typedapi/security/clearcachedroles/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearcachedroles @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedroles // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/clear_cached_roles/ClearCachedRolesResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/clear_cached_roles/ClearCachedRolesResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go b/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go index ae193e3402..ec993341dd 100644 --- a/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go +++ b/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go @@ -16,9 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Evicts tokens from the service account token caches. +// Clear service account token caches. +// +// Evict a subset of all entries from the service account token caches. +// Two separate caches exist for service account tokens: one cache for tokens +// backed by the `service_tokens` file, and another for tokens backed by the +// `.security` index. +// This API clears matching entries from both caches. +// +// The cache for service account tokens backed by the `.security` index is +// cleared automatically on state changes of the security index. +// The cache for tokens backed by the `service_tokens` file is cleared +// automatically on file changes. package clearcachedservicetokens import ( @@ -86,7 +97,18 @@ func NewClearCachedServiceTokensFunc(tp elastictransport.Interface) NewClearCach } } -// Evicts tokens from the service account token caches. +// Clear service account token caches. +// +// Evict a subset of all entries from the service account token caches. +// Two separate caches exist for service account tokens: one cache for tokens +// backed by the `service_tokens` file, and another for tokens backed by the +// `.security` index. +// This API clears matching entries from both caches. +// +// The cache for service account tokens backed by the `.security` index is +// cleared automatically on state changes of the security index. +// The cache for tokens backed by the `service_tokens` file is cleared +// automatically on file changes. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-service-token-caches.html func New(tp elastictransport.Interface) *ClearCachedServiceTokens { @@ -318,7 +340,7 @@ func (r *ClearCachedServiceTokens) Header(key, value string) *ClearCachedService return r } -// Namespace An identifier for the namespace +// Namespace The namespace, which is a top-level grouping of service accounts. // API Name: namespace func (r *ClearCachedServiceTokens) _namespace(namespace string) *ClearCachedServiceTokens { r.paramSet |= namespaceMask @@ -327,7 +349,7 @@ func (r *ClearCachedServiceTokens) _namespace(namespace string) *ClearCachedServ return r } -// Service An identifier for the service name +// Service The name of the service, which must be unique within its namespace. // API Name: service func (r *ClearCachedServiceTokens) _service(service string) *ClearCachedServiceTokens { r.paramSet |= serviceMask @@ -336,7 +358,10 @@ func (r *ClearCachedServiceTokens) _service(service string) *ClearCachedServiceT return r } -// Name A comma-separated list of service token names +// Name A comma-separated list of token names to evict from the service account token +// caches. +// Use a wildcard (`*`) to evict all tokens that belong to a service account. +// It does not support other wildcard patterns. // API Name: name func (r *ClearCachedServiceTokens) _name(name string) *ClearCachedServiceTokens { r.paramSet |= nameMask diff --git a/typedapi/security/clearcachedservicetokens/response.go b/typedapi/security/clearcachedservicetokens/response.go index 7f116efca3..fa8c3b6432 100644 --- a/typedapi/security/clearcachedservicetokens/response.go +++ b/typedapi/security/clearcachedservicetokens/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearcachedservicetokens @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedservicetokens // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/clear_cached_service_tokens/ClearCachedServiceTokensResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/clear_cached_service_tokens/ClearCachedServiceTokensResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` NodeStats types.NodeStatistics `json:"_nodes"` diff --git a/typedapi/security/createapikey/create_api_key.go b/typedapi/security/createapikey/create_api_key.go index d51a3dc971..766dfdb814 100644 --- a/typedapi/security/createapikey/create_api_key.go +++ b/typedapi/security/createapikey/create_api_key.go @@ -16,16 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create an API key. -// Creates an API key for access without requiring basic authentication. +// +// Create an API key for access without requiring basic authentication. +// +// IMPORTANT: If the credential that is used to authenticate this request is an +// API key, the derived API key cannot have any privileges. +// If you specify privileges, the API returns an error. +// // A successful request returns a JSON structure that contains the API key, its // unique id, and its name. // If applicable, it also returns expiration information for the API key in // milliseconds. +// // NOTE: By default, API keys never expire. You can specify expiration // information when you create the API keys. +// +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// To configure or turn off the API key service, refer to API key service +// setting documentation. package createapikey import ( @@ -82,14 +94,26 @@ func NewCreateApiKeyFunc(tp elastictransport.Interface) NewCreateApiKey { } // Create an API key. -// Creates an API key for access without requiring basic authentication. +// +// Create an API key for access without requiring basic authentication. +// +// IMPORTANT: If the credential that is used to authenticate this request is an +// API key, the derived API key cannot have any privileges. +// If you specify privileges, the API returns an error. +// // A successful request returns a JSON structure that contains the API key, its // unique id, and its name. // If applicable, it also returns expiration information for the API key in // milliseconds. +// // NOTE: By default, API keys never expire. You can specify expiration // information when you create the API keys. // +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// To configure or turn off the API key service, refer to API key service +// setting documentation. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html func New(tp elastictransport.Interface) *CreateApiKey { r := &CreateApiKey{ @@ -98,8 +122,6 @@ func New(tp elastictransport.Interface) *CreateApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -361,44 +383,90 @@ func (r *CreateApiKey) Pretty(pretty bool) *CreateApiKey { return r } -// Expiration Expiration time for the API key. By default, API keys never expire. +// The expiration time for the API key. +// By default, API keys never expire. // API name: expiration -func (r *CreateApiKey) Expiration(duration types.Duration) *CreateApiKey { - r.req.Expiration = duration +func (r *CreateApiKey) Expiration(duration types.DurationVariant) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() return r } -// Metadata Arbitrary metadata that you want to associate with the API key. It supports +// Arbitrary metadata that you want to associate with the API key. It supports // nested data structure. Within the metadata object, keys beginning with `_` // are reserved for system usage. // API name: metadata -func (r *CreateApiKey) Metadata(metadata types.Metadata) *CreateApiKey { - r.req.Metadata = metadata +func (r *CreateApiKey) Metadata(metadata types.MetadataVariant) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } -// Name Specifies the name for this API key. +// A name for the API key. // API name: name func (r *CreateApiKey) Name(name string) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Name = &name return r } -// RoleDescriptors An array of role descriptors for this API key. This parameter is optional. -// When it is not specified or is an empty array, then the API key will have a -// point in time snapshot of permissions of the authenticated user. If you -// supply role descriptors then the resultant permissions would be an -// intersection of API keys permissions and authenticated user’s permissions -// thereby limiting the access scope for API keys. The structure of role -// descriptor is the same as the request for create role API. For more details, -// see create or update roles API. +// An array of role descriptors for this API key. +// When it is not specified or it is an empty array, the API key will have a +// point in time snapshot of permissions of the authenticated user. +// If you supply role descriptors, the resultant permissions are an intersection +// of API keys permissions and the authenticated user's permissions thereby +// limiting the access scope for API keys. +// The structure of role descriptor is the same as the request for the create +// role API. +// For more details, refer to the create or update roles API. +// +// NOTE: Due to the way in which this permission intersection is calculated, it +// is not possible to create an API key that is a child of another API key, +// unless the derived key is created without any privileges. +// In this case, you must explicitly specify a role descriptor with no +// privileges. +// The derived API key can be used for authentication; it will not have +// authority to call Elasticsearch APIs. // API name: role_descriptors func (r *CreateApiKey) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *CreateApiKey { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RoleDescriptors = roledescriptors + return r +} + +func (r *CreateApiKey) AddRoleDescriptor(key string, value types.RoleDescriptorVariant) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.RoleDescriptors == nil { + r.req.RoleDescriptors = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.RoleDescriptors + } + + tmp[key] = *value.RoleDescriptorCaster() + r.req.RoleDescriptors = tmp return r } diff --git a/typedapi/security/createapikey/request.go b/typedapi/security/createapikey/request.go index 03393827d8..36ab7f7a43 100644 --- a/typedapi/security/createapikey/request.go +++ b/typedapi/security/createapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package createapikey @@ -32,25 +32,35 @@ import ( // Request holds the request body struct for the package createapikey // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/create_api_key/SecurityCreateApiKeyRequest.ts#L26-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/create_api_key/SecurityCreateApiKeyRequest.ts#L26-L86 type Request struct { - // Expiration Expiration time for the API key. By default, API keys never expire. + // Expiration The expiration time for the API key. + // By default, API keys never expire. Expiration types.Duration `json:"expiration,omitempty"` // Metadata Arbitrary metadata that you want to associate with the API key. It supports // nested data structure. Within the metadata object, keys beginning with `_` // are reserved for system usage. Metadata types.Metadata `json:"metadata,omitempty"` - // Name Specifies the name for this API key. + // Name A name for the API key. Name *string `json:"name,omitempty"` - // RoleDescriptors An array of role descriptors for this API key. This parameter is optional. - // When it is not specified or is an empty array, then the API key will have a - // point in time snapshot of permissions of the authenticated user. If you - // supply role descriptors then the resultant permissions would be an - // intersection of API keys permissions and authenticated user’s permissions - // thereby limiting the access scope for API keys. The structure of role - // descriptor is the same as the request for create role API. For more details, - // see create or update roles API. + // RoleDescriptors An array of role descriptors for this API key. + // When it is not specified or it is an empty array, the API key will have a + // point in time snapshot of permissions of the authenticated user. + // If you supply role descriptors, the resultant permissions are an intersection + // of API keys permissions and the authenticated user's permissions thereby + // limiting the access scope for API keys. + // The structure of role descriptor is the same as the request for the create + // role API. + // For more details, refer to the create or update roles API. + // + // NOTE: Due to the way in which this permission intersection is calculated, it + // is not possible to create an API key that is a child of another API key, + // unless the derived key is created without any privileges. + // In this case, you must explicitly specify a role descriptor with no + // privileges. + // The derived API key can be used for authentication; it will not have + // authority to call Elasticsearch APIs. RoleDescriptors map[string]types.RoleDescriptor `json:"role_descriptors,omitempty"` } diff --git a/typedapi/security/createapikey/response.go b/typedapi/security/createapikey/response.go index 50e0831f12..752846c3ff 100644 --- a/typedapi/security/createapikey/response.go +++ b/typedapi/security/createapikey/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package createapikey // Response holds the response body struct for the package createapikey // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/create_api_key/SecurityCreateApiKeyResponse.ts#L23-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/create_api_key/SecurityCreateApiKeyResponse.ts#L23-L50 type Response struct { // ApiKey Generated API key. diff --git a/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go b/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go index adcfad92f4..9332efb37c 100644 --- a/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go +++ b/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go @@ -16,21 +16,53 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a cross-cluster API key for API key based remote cluster access. +// Create a cross-cluster API key. +// +// Create an API key of the `cross_cluster` type for the API key based remote +// cluster access. +// A `cross_cluster` API key cannot be used to authenticate through the REST +// interface. +// +// IMPORTANT: To authenticate this request you must use a credential that is not +// an API key. Even if you use an API key that has the required privilege, the +// API returns an error. +// +// Cross-cluster API keys are created by the Elasticsearch API key service, +// which is automatically enabled. +// +// NOTE: Unlike REST API keys, a cross-cluster API key does not capture +// permissions of the authenticated user. The API key’s effective permission is +// exactly as specified with the `access` property. +// +// A successful request returns a JSON structure that contains the API key, its +// unique ID, and its name. If applicable, it also returns expiration +// information for the API key in milliseconds. +// +// By default, API keys never expire. You can specify expiration information +// when you create the API keys. +// +// Cross-cluster API keys can only be updated with the update cross-cluster API +// key API. +// Attempting to update them with the update REST API key API or the bulk update +// REST API keys API will result in an error. package createcrossclusterapikey import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +77,10 @@ type CreateCrossClusterApiKey struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,7 +101,35 @@ func NewCreateCrossClusterApiKeyFunc(tp elastictransport.Interface) NewCreateCro } } -// Creates a cross-cluster API key for API key based remote cluster access. +// Create a cross-cluster API key. +// +// Create an API key of the `cross_cluster` type for the API key based remote +// cluster access. +// A `cross_cluster` API key cannot be used to authenticate through the REST +// interface. +// +// IMPORTANT: To authenticate this request you must use a credential that is not +// an API key. Even if you use an API key that has the required privilege, the +// API returns an error. +// +// Cross-cluster API keys are created by the Elasticsearch API key service, +// which is automatically enabled. +// +// NOTE: Unlike REST API keys, a cross-cluster API key does not capture +// permissions of the authenticated user. The API key’s effective permission is +// exactly as specified with the `access` property. +// +// A successful request returns a JSON structure that contains the API key, its +// unique ID, and its name. If applicable, it also returns expiration +// information for the API key in milliseconds. +// +// By default, API keys never expire. You can specify expiration information +// when you create the API keys. +// +// Cross-cluster API keys can only be updated with the update cross-cluster API +// key API. +// Attempting to update them with the update REST API key API or the bulk update +// REST API keys API will result in an error. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html func New(tp elastictransport.Interface) *CreateCrossClusterApiKey { @@ -73,6 +137,8 @@ func New(tp elastictransport.Interface) *CreateCrossClusterApiKey { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +150,21 @@ func New(tp elastictransport.Interface) *CreateCrossClusterApiKey { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *CreateCrossClusterApiKey) Raw(raw io.Reader) *CreateCrossClusterApiKey { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *CreateCrossClusterApiKey) Request(req *Request) *CreateCrossClusterApiKey { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *CreateCrossClusterApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +174,31 @@ func (r *CreateCrossClusterApiKey) HttpRequest(ctx context.Context) (*http.Reque var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for CreateCrossClusterApiKey: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -182,13 +288,7 @@ func (r CreateCrossClusterApiKey) Perform(providedCtx context.Context) (*http.Re } // Do runs the request through the transport, handle the response and returns a createcrossclusterapikey.Response -func (r CreateCrossClusterApiKey) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r CreateCrossClusterApiKey) IsSuccess(providedCtx context.Context) (bool, error) { +func (r CreateCrossClusterApiKey) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -199,30 +299,46 @@ func (r CreateCrossClusterApiKey) IsSuccess(providedCtx context.Context) (bool, ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the CreateCrossClusterApiKey query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode } - return false, nil + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the CreateCrossClusterApiKey headers map. @@ -231,3 +347,111 @@ func (r *CreateCrossClusterApiKey) Header(key, value string) *CreateCrossCluster return r } + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CreateCrossClusterApiKey) ErrorTrace(errortrace bool) *CreateCrossClusterApiKey { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CreateCrossClusterApiKey) FilterPath(filterpaths ...string) *CreateCrossClusterApiKey { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CreateCrossClusterApiKey) Human(human bool) *CreateCrossClusterApiKey { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CreateCrossClusterApiKey) Pretty(pretty bool) *CreateCrossClusterApiKey { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The access to be granted to this API key. +// The access is composed of permissions for cross-cluster search and +// cross-cluster replication. +// At least one of them must be specified. +// +// NOTE: No explicit privileges should be specified for either search or +// replication access. +// The creation process automatically converts the access specification to a +// role descriptor which has relevant privileges assigned accordingly. +// API name: access +func (r *CreateCrossClusterApiKey) Access(access types.AccessVariant) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Access = *access.AccessCaster() + + return r +} + +// Expiration time for the API key. +// By default, API keys never expire. +// API name: expiration +func (r *CreateCrossClusterApiKey) Expiration(duration types.DurationVariant) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() + + return r +} + +// Arbitrary metadata that you want to associate with the API key. +// It supports nested data structure. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. +// API name: metadata +func (r *CreateCrossClusterApiKey) Metadata(metadata types.MetadataVariant) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// Specifies the name for this API key. +// API name: name +func (r *CreateCrossClusterApiKey) Name(name string) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = name + + return r +} diff --git a/typedapi/security/createcrossclusterapikey/request.go b/typedapi/security/createcrossclusterapikey/request.go new file mode 100644 index 0000000000..65e755a51e --- /dev/null +++ b/typedapi/security/createcrossclusterapikey/request.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package createcrossclusterapikey + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package createcrossclusterapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/create_cross_cluster_api_key/CreateCrossClusterApiKeyRequest.ts#L25-L80 +type Request struct { + + // Access The access to be granted to this API key. + // The access is composed of permissions for cross-cluster search and + // cross-cluster replication. + // At least one of them must be specified. + // + // NOTE: No explicit privileges should be specified for either search or + // replication access. + // The creation process automatically converts the access specification to a + // role descriptor which has relevant privileges assigned accordingly. + Access types.Access `json:"access"` + // Expiration Expiration time for the API key. + // By default, API keys never expire. + Expiration types.Duration `json:"expiration,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the API key. + // It supports nested data structure. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + Metadata types.Metadata `json:"metadata,omitempty"` + // Name Specifies the name for this API key. + Name string `json:"name"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Createcrossclusterapikey request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "access": + if err := dec.Decode(&s.Access); err != nil { + return fmt.Errorf("%s | %w", "Access", err) + } + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} diff --git a/typedapi/security/createcrossclusterapikey/response.go b/typedapi/security/createcrossclusterapikey/response.go new file mode 100644 index 0000000000..6c89c0ac46 --- /dev/null +++ b/typedapi/security/createcrossclusterapikey/response.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package createcrossclusterapikey + +// Response holds the response body struct for the package createcrossclusterapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/create_cross_cluster_api_key/CreateCrossClusterApiKeyResponse.ts#L23-L48 +type Response struct { + + // ApiKey Generated API key. + ApiKey string `json:"api_key"` + // Encoded API key credentials which is the base64-encoding of + // the UTF-8 representation of `id` and `api_key` joined + // by a colon (`:`). + Encoded string `json:"encoded"` + // Expiration Expiration in milliseconds for the API key. + Expiration *int64 `json:"expiration,omitempty"` + // Id Unique ID for this API key. + Id string `json:"id"` + // Name Specifies the name for this API key. + Name string `json:"name"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/createservicetoken/create_service_token.go b/typedapi/security/createservicetoken/create_service_token.go index c428b723e6..05f05a8787 100644 --- a/typedapi/security/createservicetoken/create_service_token.go +++ b/typedapi/security/createservicetoken/create_service_token.go @@ -16,10 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a service accounts token for access without requiring basic +// Create a service account token. +// +// Create a service accounts token for access without requiring basic // authentication. +// +// NOTE: Service account tokens never expire. +// You must actively delete them if they are no longer needed. package createservicetoken import ( @@ -86,9 +91,14 @@ func NewCreateServiceTokenFunc(tp elastictransport.Interface) NewCreateServiceTo } } -// Creates a service accounts token for access without requiring basic +// Create a service account token. +// +// Create a service accounts token for access without requiring basic // authentication. // +// NOTE: Service account tokens never expire. +// You must actively delete them if they are no longer needed. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html func New(tp elastictransport.Interface) *CreateServiceToken { r := &CreateServiceToken{ @@ -340,7 +350,7 @@ func (r *CreateServiceToken) Header(key, value string) *CreateServiceToken { return r } -// Namespace An identifier for the namespace +// Namespace The name of the namespace, which is a top-level grouping of service accounts. // API Name: namespace func (r *CreateServiceToken) _namespace(namespace string) *CreateServiceToken { r.paramSet |= namespaceMask @@ -349,7 +359,7 @@ func (r *CreateServiceToken) _namespace(namespace string) *CreateServiceToken { return r } -// Service An identifier for the service name +// Service The name of the service. // API Name: service func (r *CreateServiceToken) _service(service string) *CreateServiceToken { r.paramSet |= serviceMask @@ -358,7 +368,18 @@ func (r *CreateServiceToken) _service(service string) *CreateServiceToken { return r } -// Name An identifier for the token name +// Name The name for the service account token. +// If omitted, a random name will be generated. +// +// Token names must be at least one and no more than 256 characters. +// They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and +// underscores (`_`), but cannot begin with an underscore. +// +// NOTE: Token names must be unique in the context of the associated service +// account. +// They must also be globally unique with their fully qualified names, which are +// comprised of the service account principal and token name, such as +// `//`. // API Name: name func (r *CreateServiceToken) Name(name string) *CreateServiceToken { r.paramSet |= nameMask diff --git a/typedapi/security/createservicetoken/response.go b/typedapi/security/createservicetoken/response.go index b8bc7e90d1..99df85943f 100644 --- a/typedapi/security/createservicetoken/response.go +++ b/typedapi/security/createservicetoken/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package createservicetoken @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package createservicetoken // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/create_service_token/CreateServiceTokenResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/create_service_token/CreateServiceTokenResponse.ts#L22-L30 type Response struct { Created bool `json:"created"` Token types.ServiceToken `json:"token"` diff --git a/typedapi/security/delegatepki/delegate_pki.go b/typedapi/security/delegatepki/delegate_pki.go new file mode 100644 index 0000000000..f684c46cc4 --- /dev/null +++ b/typedapi/security/delegatepki/delegate_pki.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Delegate PKI authentication. +// +// This API implements the exchange of an X509Certificate chain for an +// Elasticsearch access token. +// The certificate chain is validated, according to RFC 5280, by sequentially +// considering the trust configuration of every installed PKI realm that has +// `delegation.enabled` set to `true`. +// A successfully trusted client certificate is also subject to the validation +// of the subject distinguished name according to thw `username_pattern` of the +// respective realm. +// +// This API is called by smart and trusted proxies, such as Kibana, which +// terminate the user's TLS session but still want to authenticate the user by +// using a PKI realm—-​as if the user connected directly to Elasticsearch. +// +// IMPORTANT: The association between the subject public key in the target +// certificate and the corresponding private key is not validated. +// This is part of the TLS authentication process and it is delegated to the +// proxy that calls this API. +// The proxy is trusted to have performed the TLS authentication and this API +// translates that authentication into an Elasticsearch access token. +package delegatepki + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DelegatePki struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelegatePki type alias for index. +type NewDelegatePki func() *DelegatePki + +// NewDelegatePkiFunc returns a new instance of DelegatePki with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDelegatePkiFunc(tp elastictransport.Interface) NewDelegatePki { + return func() *DelegatePki { + n := New(tp) + + return n + } +} + +// Delegate PKI authentication. +// +// This API implements the exchange of an X509Certificate chain for an +// Elasticsearch access token. +// The certificate chain is validated, according to RFC 5280, by sequentially +// considering the trust configuration of every installed PKI realm that has +// `delegation.enabled` set to `true`. +// A successfully trusted client certificate is also subject to the validation +// of the subject distinguished name according to thw `username_pattern` of the +// respective realm. +// +// This API is called by smart and trusted proxies, such as Kibana, which +// terminate the user's TLS session but still want to authenticate the user by +// using a PKI realm—-​as if the user connected directly to Elasticsearch. +// +// IMPORTANT: The association between the subject public key in the target +// certificate and the corresponding private key is not validated. +// This is part of the TLS authentication process and it is delegated to the +// proxy that calls this API. +// The proxy is trusted to have performed the TLS authentication and this API +// translates that authentication into an Elasticsearch access token. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delegate-pki-authentication.html +func New(tp elastictransport.Interface) *DelegatePki { + r := &DelegatePki{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *DelegatePki) Raw(raw io.Reader) *DelegatePki { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *DelegatePki) Request(req *Request) *DelegatePki { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DelegatePki) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for DelegatePki: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("delegate_pki") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DelegatePki) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.delegate_pki") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.delegate_pki") + if reader := instrument.RecordRequestBody(ctx, "security.delegate_pki", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delegate_pki") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DelegatePki query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delegatepki.Response +func (r DelegatePki) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delegate_pki") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the DelegatePki headers map. +func (r *DelegatePki) Header(key, value string) *DelegatePki { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DelegatePki) ErrorTrace(errortrace bool) *DelegatePki { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DelegatePki) FilterPath(filterpaths ...string) *DelegatePki { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DelegatePki) Human(human bool) *DelegatePki { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DelegatePki) Pretty(pretty bool) *DelegatePki { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The X509Certificate chain, which is represented as an ordered string array. +// Each string in the array is a base64-encoded (Section 4 of RFC4648 - not +// base64url-encoded) of the certificate's DER encoding. +// +// The first element is the target certificate that contains the subject +// distinguished name that is requesting access. +// This may be followed by additional certificates; each subsequent certificate +// is used to certify the previous one. +// API name: x509_certificate_chain +func (r *DelegatePki) X509CertificateChain(x509certificatechains ...string) *DelegatePki { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range x509certificatechains { + + r.req.X509CertificateChain = append(r.req.X509CertificateChain, v) + + } + return r +} diff --git a/typedapi/security/delegatepki/request.go b/typedapi/security/delegatepki/request.go new file mode 100644 index 0000000000..ce6bd43a1c --- /dev/null +++ b/typedapi/security/delegatepki/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package delegatepki + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package delegatepki +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delegate_pki/SecurityDelegatePkiRequest.ts#L22-L57 +type Request struct { + + // X509CertificateChain The X509Certificate chain, which is represented as an ordered string array. + // Each string in the array is a base64-encoded (Section 4 of RFC4648 - not + // base64url-encoded) of the certificate's DER encoding. + // + // The first element is the target certificate that contains the subject + // distinguished name that is requesting access. + // This may be followed by additional certificates; each subsequent certificate + // is used to certify the previous one. + X509CertificateChain []string `json:"x509_certificate_chain"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Delegatepki request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/delegatepki/response.go b/typedapi/security/delegatepki/response.go new file mode 100644 index 0000000000..22df9a2af2 --- /dev/null +++ b/typedapi/security/delegatepki/response.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package delegatepki + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package delegatepki +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delegate_pki/SecurityDelegatePkiResponse.ts#L24-L41 +type Response struct { + + // AccessToken An access token associated with the subject distinguished name of the + // client's certificate. + AccessToken string `json:"access_token"` + Authentication *types.Authentication `json:"authentication,omitempty"` + // ExpiresIn The amount of time (in seconds) before the token expires. + ExpiresIn int64 `json:"expires_in"` + // Type The type of token. + Type string `json:"type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/deleteprivileges/delete_privileges.go b/typedapi/security/deleteprivileges/delete_privileges.go index 7ea4de040c..bb817b16e3 100644 --- a/typedapi/security/deleteprivileges/delete_privileges.go +++ b/typedapi/security/deleteprivileges/delete_privileges.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Removes application privileges. +// Delete application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. package deleteprivileges import ( @@ -82,7 +89,14 @@ func NewDeletePrivilegesFunc(tp elastictransport.Interface) NewDeletePrivileges } } -// Removes application privileges. +// Delete application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-privilege.html func New(tp elastictransport.Interface) *DeletePrivileges { @@ -302,7 +316,8 @@ func (r *DeletePrivileges) Header(key, value string) *DeletePrivileges { return r } -// Application Application name +// Application The name of the application. +// Application privileges are always associated with exactly one application. // API Name: application func (r *DeletePrivileges) _application(application string) *DeletePrivileges { r.paramSet |= applicationMask @@ -311,7 +326,7 @@ func (r *DeletePrivileges) _application(application string) *DeletePrivileges { return r } -// Name Privilege name +// Name The name of the privilege. // API Name: name func (r *DeletePrivileges) _name(name string) *DeletePrivileges { r.paramSet |= nameMask diff --git a/typedapi/security/deleteprivileges/response.go b/typedapi/security/deleteprivileges/response.go index c910f3c0ea..674eb91481 100644 --- a/typedapi/security/deleteprivileges/response.go +++ b/typedapi/security/deleteprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deleteprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/delete_privileges/SecurityDeletePrivilegesResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delete_privileges/SecurityDeletePrivilegesResponse.ts#L23-L26 type Response map[string]map[string]types.FoundStatus diff --git a/typedapi/security/deleterole/delete_role.go b/typedapi/security/deleterole/delete_role.go index 3c056c8109..e04b9fe375 100644 --- a/typedapi/security/deleterole/delete_role.go +++ b/typedapi/security/deleterole/delete_role.go @@ -16,9 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Removes roles in the native realm. +// Delete roles. +// +// Delete roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The delete roles API cannot remove roles that are defined in roles files. package deleterole import ( @@ -77,7 +82,12 @@ func NewDeleteRoleFunc(tp elastictransport.Interface) NewDeleteRole { } } -// Removes roles in the native realm. +// Delete roles. +// +// Delete roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The delete roles API cannot remove roles that are defined in roles files. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html func New(tp elastictransport.Interface) *DeleteRole { @@ -291,7 +301,7 @@ func (r *DeleteRole) Header(key, value string) *DeleteRole { return r } -// Name Role name +// Name The name of the role. // API Name: name func (r *DeleteRole) _name(name string) *DeleteRole { r.paramSet |= nameMask diff --git a/typedapi/security/deleterole/response.go b/typedapi/security/deleterole/response.go index bef7f1a376..8e48a5ff28 100644 --- a/typedapi/security/deleterole/response.go +++ b/typedapi/security/deleterole/response.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleterole // Response holds the response body struct for the package deleterole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/delete_role/SecurityDeleteRoleResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delete_role/SecurityDeleteRoleResponse.ts#L20-L28 type Response struct { + + // Found If the role is successfully deleted, `found` is `true`. + // Otherwise, `found` is `false`. Found bool `json:"found"` } diff --git a/typedapi/security/deleterolemapping/delete_role_mapping.go b/typedapi/security/deleterolemapping/delete_role_mapping.go index 661155cdb3..dac76d26c0 100644 --- a/typedapi/security/deleterolemapping/delete_role_mapping.go +++ b/typedapi/security/deleterolemapping/delete_role_mapping.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Removes role mappings. +// Delete role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The delete role mappings API cannot remove role mappings that are defined in +// role mapping files. package deleterolemapping import ( @@ -77,7 +83,13 @@ func NewDeleteRoleMappingFunc(tp elastictransport.Interface) NewDeleteRoleMappin } } -// Removes role mappings. +// Delete role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The delete role mappings API cannot remove role mappings that are defined in +// role mapping files. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html func New(tp elastictransport.Interface) *DeleteRoleMapping { @@ -291,7 +303,9 @@ func (r *DeleteRoleMapping) Header(key, value string) *DeleteRoleMapping { return r } -// Name Role-mapping name +// Name The distinct name that identifies the role mapping. +// The name is used solely as an identifier to facilitate interaction via the +// API; it does not affect the behavior of the mapping in any way. // API Name: name func (r *DeleteRoleMapping) _name(name string) *DeleteRoleMapping { r.paramSet |= nameMask diff --git a/typedapi/security/deleterolemapping/response.go b/typedapi/security/deleterolemapping/response.go index cda78caa60..35796a0530 100644 --- a/typedapi/security/deleterolemapping/response.go +++ b/typedapi/security/deleterolemapping/response.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleterolemapping // Response holds the response body struct for the package deleterolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/delete_role_mapping/SecurityDeleteRoleMappingResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delete_role_mapping/SecurityDeleteRoleMappingResponse.ts#L20-L28 type Response struct { + + // Found If the mapping is successfully deleted, `found` is `true`. + // Otherwise, `found` is `false`. Found bool `json:"found"` } diff --git a/typedapi/security/deleteservicetoken/delete_service_token.go b/typedapi/security/deleteservicetoken/delete_service_token.go index 9b5d021caf..0363e772ff 100644 --- a/typedapi/security/deleteservicetoken/delete_service_token.go +++ b/typedapi/security/deleteservicetoken/delete_service_token.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a service account token. +// Delete service account tokens. +// +// Delete service account tokens for a service in a specified namespace. package deleteservicetoken import ( @@ -87,7 +89,9 @@ func NewDeleteServiceTokenFunc(tp elastictransport.Interface) NewDeleteServiceTo } } -// Deletes a service account token. +// Delete service account tokens. +// +// Delete service account tokens for a service in a specified namespace. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-service-token.html func New(tp elastictransport.Interface) *DeleteServiceToken { @@ -317,7 +321,7 @@ func (r *DeleteServiceToken) Header(key, value string) *DeleteServiceToken { return r } -// Namespace An identifier for the namespace +// Namespace The namespace, which is a top-level grouping of service accounts. // API Name: namespace func (r *DeleteServiceToken) _namespace(namespace string) *DeleteServiceToken { r.paramSet |= namespaceMask @@ -326,7 +330,7 @@ func (r *DeleteServiceToken) _namespace(namespace string) *DeleteServiceToken { return r } -// Service An identifier for the service name +// Service The service name. // API Name: service func (r *DeleteServiceToken) _service(service string) *DeleteServiceToken { r.paramSet |= serviceMask @@ -335,7 +339,7 @@ func (r *DeleteServiceToken) _service(service string) *DeleteServiceToken { return r } -// Name An identifier for the token name +// Name The name of the service account token. // API Name: name func (r *DeleteServiceToken) _name(name string) *DeleteServiceToken { r.paramSet |= nameMask diff --git a/typedapi/security/deleteservicetoken/response.go b/typedapi/security/deleteservicetoken/response.go index bbc601c514..9622b9525f 100644 --- a/typedapi/security/deleteservicetoken/response.go +++ b/typedapi/security/deleteservicetoken/response.go @@ -16,14 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteservicetoken // Response holds the response body struct for the package deleteservicetoken // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/delete_service_token/DeleteServiceTokenResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delete_service_token/DeleteServiceTokenResponse.ts#L20-L28 type Response struct { + + // Found If the service account token is successfully deleted, the request returns + // `{"found": true}`. + // Otherwise, the response will have status code 404 and `found` is set to + // `false`. Found bool `json:"found"` } diff --git a/typedapi/security/deleteuser/delete_user.go b/typedapi/security/deleteuser/delete_user.go index 74646c1cdd..9a93925adb 100644 --- a/typedapi/security/deleteuser/delete_user.go +++ b/typedapi/security/deleteuser/delete_user.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes users from the native realm. +// Delete users. +// +// Delete users from the native realm. package deleteuser import ( @@ -77,7 +79,9 @@ func NewDeleteUserFunc(tp elastictransport.Interface) NewDeleteUser { } } -// Deletes users from the native realm. +// Delete users. +// +// Delete users from the native realm. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html func New(tp elastictransport.Interface) *DeleteUser { @@ -291,7 +295,7 @@ func (r *DeleteUser) Header(key, value string) *DeleteUser { return r } -// Username username +// Username An identifier for the user. // API Name: username func (r *DeleteUser) _username(username string) *DeleteUser { r.paramSet |= usernameMask diff --git a/typedapi/security/deleteuser/response.go b/typedapi/security/deleteuser/response.go index 7a3f83978d..eb09ded718 100644 --- a/typedapi/security/deleteuser/response.go +++ b/typedapi/security/deleteuser/response.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteuser // Response holds the response body struct for the package deleteuser // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/delete_user/SecurityDeleteUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delete_user/SecurityDeleteUserResponse.ts#L20-L28 type Response struct { + + // Found If the user is successfully deleted, the request returns `{"found": true}`. + // Otherwise, `found` is set to `false`. Found bool `json:"found"` } diff --git a/typedapi/security/disableuser/disable_user.go b/typedapi/security/disableuser/disable_user.go index 2a39d83720..0c15b57dde 100644 --- a/typedapi/security/disableuser/disable_user.go +++ b/typedapi/security/disableuser/disable_user.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Disables users in the native realm. +// Disable users. +// +// Disable users in the native realm. +// By default, when you create users, they are enabled. +// You can use this API to revoke a user's access to Elasticsearch. package disableuser import ( @@ -77,7 +81,11 @@ func NewDisableUserFunc(tp elastictransport.Interface) NewDisableUser { } } -// Disables users in the native realm. +// Disable users. +// +// Disable users in the native realm. +// By default, when you create users, they are enabled. +// You can use this API to revoke a user's access to Elasticsearch. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html func New(tp elastictransport.Interface) *DisableUser { @@ -293,7 +301,7 @@ func (r *DisableUser) Header(key, value string) *DisableUser { return r } -// Username The username of the user to disable +// Username An identifier for the user. // API Name: username func (r *DisableUser) _username(username string) *DisableUser { r.paramSet |= usernameMask diff --git a/typedapi/security/disableuser/response.go b/typedapi/security/disableuser/response.go index 60f3d741d7..f4797ec4f2 100644 --- a/typedapi/security/disableuser/response.go +++ b/typedapi/security/disableuser/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package disableuser // Response holds the response body struct for the package disableuser // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/disable_user/SecurityDisableUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/disable_user/SecurityDisableUserResponse.ts#L20-L22 type Response struct { } diff --git a/typedapi/security/disableuserprofile/disable_user_profile.go b/typedapi/security/disableuserprofile/disable_user_profile.go index 71933a19c3..ff36cd560c 100644 --- a/typedapi/security/disableuserprofile/disable_user_profile.go +++ b/typedapi/security/disableuserprofile/disable_user_profile.go @@ -16,9 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Disables a user profile so it's not visible in user profile searches. +// Disable a user profile. +// +// Disable user profiles so that they are not visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, its automatically enabled and visible in +// user profile searches. You can use the disable user profile API to disable a +// user profile so it’s not visible in these searches. +// To re-enable a disabled user profile, use the enable user profile API . package disableuserprofile import ( @@ -77,7 +90,20 @@ func NewDisableUserProfileFunc(tp elastictransport.Interface) NewDisableUserProf } } -// Disables a user profile so it's not visible in user profile searches. +// Disable a user profile. +// +// Disable user profiles so that they are not visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, its automatically enabled and visible in +// user profile searches. You can use the disable user profile API to disable a +// user profile so it’s not visible in these searches. +// To re-enable a disabled user profile, use the enable user profile API . // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user-profile.html func New(tp elastictransport.Interface) *DisableUserProfile { @@ -303,9 +329,10 @@ func (r *DisableUserProfile) _uid(uid string) *DisableUserProfile { } // Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation -// visible to search, if 'wait_for' then wait for a refresh to make this -// operation -// visible to search, if 'false' do nothing with refreshes. +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', it does nothing with refreshes. // API name: refresh func (r *DisableUserProfile) Refresh(refresh refresh.Refresh) *DisableUserProfile { r.values.Set("refresh", refresh.String()) diff --git a/typedapi/security/disableuserprofile/response.go b/typedapi/security/disableuserprofile/response.go index b69ccd9bb9..36e72540f0 100644 --- a/typedapi/security/disableuserprofile/response.go +++ b/typedapi/security/disableuserprofile/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package disableuserprofile // Response holds the response body struct for the package disableuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/disable_user_profile/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/disable_user_profile/Response.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/security/enableuser/enable_user.go b/typedapi/security/enableuser/enable_user.go index 2a89acaccd..0c6e533fe7 100644 --- a/typedapi/security/enableuser/enable_user.go +++ b/typedapi/security/enableuser/enable_user.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Enables users in the native realm. +// Enable users. +// +// Enable users in the native realm. +// By default, when you create users, they are enabled. package enableuser import ( @@ -77,7 +80,10 @@ func NewEnableUserFunc(tp elastictransport.Interface) NewEnableUser { } } -// Enables users in the native realm. +// Enable users. +// +// Enable users in the native realm. +// By default, when you create users, they are enabled. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html func New(tp elastictransport.Interface) *EnableUser { @@ -293,7 +299,7 @@ func (r *EnableUser) Header(key, value string) *EnableUser { return r } -// Username The username of the user to enable +// Username An identifier for the user. // API Name: username func (r *EnableUser) _username(username string) *EnableUser { r.paramSet |= usernameMask diff --git a/typedapi/security/enableuser/response.go b/typedapi/security/enableuser/response.go index 64dde59ea4..fd7c818e00 100644 --- a/typedapi/security/enableuser/response.go +++ b/typedapi/security/enableuser/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package enableuser // Response holds the response body struct for the package enableuser // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/enable_user/SecurityEnableUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/enable_user/SecurityEnableUserResponse.ts#L20-L22 type Response struct { } diff --git a/typedapi/security/enableuserprofile/enable_user_profile.go b/typedapi/security/enableuserprofile/enable_user_profile.go index c832969880..f2797db47a 100644 --- a/typedapi/security/enableuserprofile/enable_user_profile.go +++ b/typedapi/security/enableuserprofile/enable_user_profile.go @@ -16,9 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Enables a user profile so it's visible in user profile searches. +// Enable a user profile. +// +// Enable user profiles to make them visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, it's automatically enabled and visible in +// user profile searches. +// If you later disable the user profile, you can use the enable user profile +// API to make the profile visible in these searches again. package enableuserprofile import ( @@ -77,7 +90,20 @@ func NewEnableUserProfileFunc(tp elastictransport.Interface) NewEnableUserProfil } } -// Enables a user profile so it's visible in user profile searches. +// Enable a user profile. +// +// Enable user profiles to make them visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, it's automatically enabled and visible in +// user profile searches. +// If you later disable the user profile, you can use the enable user profile +// API to make the profile visible in these searches again. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user-profile.html func New(tp elastictransport.Interface) *EnableUserProfile { @@ -293,7 +319,7 @@ func (r *EnableUserProfile) Header(key, value string) *EnableUserProfile { return r } -// Uid Unique identifier for the user profile. +// Uid A unique identifier for the user profile. // API Name: uid func (r *EnableUserProfile) _uid(uid string) *EnableUserProfile { r.paramSet |= uidMask @@ -303,9 +329,10 @@ func (r *EnableUserProfile) _uid(uid string) *EnableUserProfile { } // Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation -// visible to search, if 'wait_for' then wait for a refresh to make this -// operation -// visible to search, if 'false' do nothing with refreshes. +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', nothing is done with refreshes. // API name: refresh func (r *EnableUserProfile) Refresh(refresh refresh.Refresh) *EnableUserProfile { r.values.Set("refresh", refresh.String()) diff --git a/typedapi/security/enableuserprofile/response.go b/typedapi/security/enableuserprofile/response.go index 4daf7ec4ba..0e9d924cfa 100644 --- a/typedapi/security/enableuserprofile/response.go +++ b/typedapi/security/enableuserprofile/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package enableuserprofile // Response holds the response body struct for the package enableuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/enable_user_profile/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/enable_user_profile/Response.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/security/enrollkibana/enroll_kibana.go b/typedapi/security/enrollkibana/enroll_kibana.go index 1e87c5e9b4..80cbcd5c23 100644 --- a/typedapi/security/enrollkibana/enroll_kibana.go +++ b/typedapi/security/enrollkibana/enroll_kibana.go @@ -16,10 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Enables a Kibana instance to configure itself for communication with a -// secured Elasticsearch cluster. +// Enroll Kibana. +// +// Enable a Kibana instance to configure itself for communication with a secured +// Elasticsearch cluster. +// +// NOTE: This API is currently intended for internal use only by Kibana. +// Kibana uses this API internally to configure itself for communications with +// an Elasticsearch cluster that already has security features enabled. package enrollkibana import ( @@ -69,8 +75,14 @@ func NewEnrollKibanaFunc(tp elastictransport.Interface) NewEnrollKibana { } } -// Enables a Kibana instance to configure itself for communication with a -// secured Elasticsearch cluster. +// Enroll Kibana. +// +// Enable a Kibana instance to configure itself for communication with a secured +// Elasticsearch cluster. +// +// NOTE: This API is currently intended for internal use only by Kibana. +// Kibana uses this API internally to configure itself for communications with +// an Elasticsearch cluster that already has security features enabled. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-kibana-enrollment.html func New(tp elastictransport.Interface) *EnrollKibana { diff --git a/typedapi/security/enrollkibana/response.go b/typedapi/security/enrollkibana/response.go index bf69554f10..3a77a62d8d 100644 --- a/typedapi/security/enrollkibana/response.go +++ b/typedapi/security/enrollkibana/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package enrollkibana @@ -26,8 +26,13 @@ import ( // Response holds the response body struct for the package enrollkibana // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/enroll_kibana/Response.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/enroll_kibana/Response.ts#L20-L29 type Response struct { + + // HttpCa The CA certificate used to sign the node certificates that Elasticsearch uses + // for TLS on the HTTP layer. + // The certificate is returned as a Base64 encoded string of the ASN.1 DER + // encoding of the certificate. HttpCa string `json:"http_ca"` Token types.KibanaToken `json:"token"` } diff --git a/typedapi/security/enrollnode/enroll_node.go b/typedapi/security/enrollnode/enroll_node.go index ac01cd3281..483bf089d5 100644 --- a/typedapi/security/enrollnode/enroll_node.go +++ b/typedapi/security/enrollnode/enroll_node.go @@ -16,9 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Allows a new node to join an existing cluster with security features enabled. +// Enroll a node. +// +// Enroll a new node to allow it to join an existing cluster with security +// features enabled. +// +// The response contains all the necessary information for the joining node to +// bootstrap discovery and security related settings so that it can successfully +// join the cluster. +// The response contains key and certificate material that allows the caller to +// generate valid signed certificates for the HTTP layer of all nodes in the +// cluster. package enrollnode import ( @@ -68,7 +78,17 @@ func NewEnrollNodeFunc(tp elastictransport.Interface) NewEnrollNode { } } -// Allows a new node to join an existing cluster with security features enabled. +// Enroll a node. +// +// Enroll a new node to allow it to join an existing cluster with security +// features enabled. +// +// The response contains all the necessary information for the joining node to +// bootstrap discovery and security related settings so that it can successfully +// join the cluster. +// The response contains key and certificate material that allows the caller to +// generate valid signed certificates for the HTTP layer of all nodes in the +// cluster. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-node-enrollment.html func New(tp elastictransport.Interface) *EnrollNode { diff --git a/typedapi/security/enrollnode/response.go b/typedapi/security/enrollnode/response.go index e4295ef16c..5f8e1952d2 100644 --- a/typedapi/security/enrollnode/response.go +++ b/typedapi/security/enrollnode/response.go @@ -16,20 +16,36 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package enrollnode // Response holds the response body struct for the package enrollnode // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/enroll_node/Response.ts#L20-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/enroll_node/Response.ts#L20-L47 type Response struct { - HttpCaCert string `json:"http_ca_cert"` - HttpCaKey string `json:"http_ca_key"` - NodesAddresses []string `json:"nodes_addresses"` - TransportCaCert string `json:"transport_ca_cert"` - TransportCert string `json:"transport_cert"` - TransportKey string `json:"transport_key"` + + // HttpCaCert The CA certificate that can be used by the new node in order to sign its + // certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER + // encoding of the certificate. + HttpCaCert string `json:"http_ca_cert"` + // HttpCaKey The CA private key that can be used by the new node in order to sign its + // certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER + // encoding of the key. + HttpCaKey string `json:"http_ca_key"` + // NodesAddresses A list of transport addresses in the form of `host:port` for the nodes that + // are already members of the cluster. + NodesAddresses []string `json:"nodes_addresses"` + // TransportCaCert The CA certificate that is used to sign the TLS certificate for the transport + // layer, as a Base64 encoded string of the ASN.1 DER encoding of the + // certificate. + TransportCaCert string `json:"transport_ca_cert"` + // TransportCert The certificate that the node can use for TLS for its transport layer, as a + // Base64 encoded string of the ASN.1 DER encoding of the certificate. + TransportCert string `json:"transport_cert"` + // TransportKey The private key that the node can use for TLS for its transport layer, as a + // Base64 encoded string of the ASN.1 DER encoding of the key. + TransportKey string `json:"transport_key"` } // NewResponse returns a Response diff --git a/typedapi/security/getapikey/get_api_key.go b/typedapi/security/getapikey/get_api_key.go index 1ce96a3a01..59076a1792 100644 --- a/typedapi/security/getapikey/get_api_key.go +++ b/typedapi/security/getapikey/get_api_key.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get API key information. +// // Retrieves information for one or more API keys. // NOTE: If you have only the `manage_own_api_key` privilege, this API returns // only the API keys that you own. @@ -75,6 +76,7 @@ func NewGetApiKeyFunc(tp elastictransport.Interface) NewGetApiKey { } // Get API key information. +// // Retrieves information for one or more API keys. // NOTE: If you have only the `manage_own_api_key` privilege, this API returns // only the API keys that you own. diff --git a/typedapi/security/getapikey/response.go b/typedapi/security/getapikey/response.go index 2608342aa3..6e877728bb 100644 --- a/typedapi/security/getapikey/response.go +++ b/typedapi/security/getapikey/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getapikey @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getapikey // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_api_key/SecurityGetApiKeyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_api_key/SecurityGetApiKeyResponse.ts#L22-L24 type Response struct { ApiKeys []types.ApiKey `json:"api_keys"` } diff --git a/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go b/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go index e8fae1bf77..376ac44e77 100644 --- a/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go +++ b/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves the list of cluster privileges and index privileges that are -// available in this version of Elasticsearch. +// Get builtin privileges. +// +// Get the list of cluster privileges and index privileges that are available in +// this version of Elasticsearch. package getbuiltinprivileges import ( @@ -69,8 +71,10 @@ func NewGetBuiltinPrivilegesFunc(tp elastictransport.Interface) NewGetBuiltinPri } } -// Retrieves the list of cluster privileges and index privileges that are -// available in this version of Elasticsearch. +// Get builtin privileges. +// +// Get the list of cluster privileges and index privileges that are available in +// this version of Elasticsearch. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html func New(tp elastictransport.Interface) *GetBuiltinPrivileges { diff --git a/typedapi/security/getbuiltinprivileges/response.go b/typedapi/security/getbuiltinprivileges/response.go index 140842344d..fb9a24f858 100644 --- a/typedapi/security/getbuiltinprivileges/response.go +++ b/typedapi/security/getbuiltinprivileges/response.go @@ -16,24 +16,29 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getbuiltinprivileges import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/remoteclusterprivilege" ) // Response holds the response body struct for the package getbuiltinprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_builtin_privileges/SecurityGetBuiltinPrivilegesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_builtin_privileges/SecurityGetBuiltinPrivilegesResponse.ts#L26-L42 type Response struct { - Cluster []string `json:"cluster"` - Index []string `json:"index"` + + // Cluster The list of cluster privileges that are understood by this version of + // Elasticsearch. + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster"` + // Index The list of index privileges that are understood by this version of + // Elasticsearch. + Index []string `json:"index"` + // RemoteCluster The list of remote_cluster privileges that are understood by this version of + // Elasticsearch. + RemoteCluster []remoteclusterprivilege.RemoteClusterPrivilege `json:"remote_cluster"` } // NewResponse returns a Response @@ -41,43 +46,3 @@ func NewResponse() *Response { r := &Response{} return r } - -func (s *Response) UnmarshalJSON(data []byte) error { - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "cluster": - if err := dec.Decode(&s.Cluster); err != nil { - return fmt.Errorf("%s | %w", "Cluster", err) - } - - case "index": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Index", err) - } - - s.Index = append(s.Index, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { - return fmt.Errorf("%s | %w", "Index", err) - } - } - - } - } - return nil -} diff --git a/typedapi/security/getprivileges/get_privileges.go b/typedapi/security/getprivileges/get_privileges.go index df8290b6de..6c908391f2 100644 --- a/typedapi/security/getprivileges/get_privileges.go +++ b/typedapi/security/getprivileges/get_privileges.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves application privileges. +// Get application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `read_security` cluster privilege (or a greater privilege such as +// `manage_security` or `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. package getprivileges import ( @@ -77,7 +84,14 @@ func NewGetPrivilegesFunc(tp elastictransport.Interface) NewGetPrivileges { } } -// Retrieves application privileges. +// Get application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `read_security` cluster privilege (or a greater privilege such as +// `manage_security` or `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html func New(tp elastictransport.Interface) *GetPrivileges { @@ -317,7 +331,10 @@ func (r *GetPrivileges) Header(key, value string) *GetPrivileges { return r } -// Application Application name +// Application The name of the application. +// Application privileges are always associated with exactly one application. +// If you do not specify this parameter, the API returns information about all +// privileges for all applications. // API Name: application func (r *GetPrivileges) Application(application string) *GetPrivileges { r.paramSet |= applicationMask @@ -326,7 +343,9 @@ func (r *GetPrivileges) Application(application string) *GetPrivileges { return r } -// Name Privilege name +// Name The name of the privilege. +// If you do not specify this parameter, the API returns information about all +// privileges for the requested application. // API Name: name func (r *GetPrivileges) Name(name string) *GetPrivileges { r.paramSet |= nameMask diff --git a/typedapi/security/getprivileges/response.go b/typedapi/security/getprivileges/response.go index 8429b310e0..377180f390 100644 --- a/typedapi/security/getprivileges/response.go +++ b/typedapi/security/getprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_privileges/SecurityGetPrivilegesResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_privileges/SecurityGetPrivilegesResponse.ts#L23-L29 type Response map[string]map[string]types.PrivilegesActions diff --git a/typedapi/security/getrole/get_role.go b/typedapi/security/getrole/get_role.go index f8bb3b83c6..81cf741521 100644 --- a/typedapi/security/getrole/get_role.go +++ b/typedapi/security/getrole/get_role.go @@ -16,8 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Get roles. +// +// Get roles in the native realm. // The role management APIs are generally the preferred way to manage roles, // rather than using file-based role management. // The get roles API cannot retrieve roles that are defined in roles files. @@ -76,6 +79,9 @@ func NewGetRoleFunc(tp elastictransport.Interface) NewGetRole { } } +// Get roles. +// +// Get roles in the native realm. // The role management APIs are generally the preferred way to manage roles, // rather than using file-based role management. // The get roles API cannot retrieve roles that are defined in roles files. @@ -299,9 +305,10 @@ func (r *GetRole) Header(key, value string) *GetRole { return r } -// Name The name of the role. You can specify multiple roles as a comma-separated -// list. If you do not specify this parameter, the API returns information about -// all roles. +// Name The name of the role. +// You can specify multiple roles as a comma-separated list. +// If you do not specify this parameter, the API returns information about all +// roles. // API Name: name func (r *GetRole) Name(name string) *GetRole { r.paramSet |= nameMask diff --git a/typedapi/security/getrole/response.go b/typedapi/security/getrole/response.go index 38db853ad7..385f1aab86 100644 --- a/typedapi/security/getrole/response.go +++ b/typedapi/security/getrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getrole @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_role/SecurityGetRoleResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_role/SecurityGetRoleResponse.ts#L23-L31 type Response map[string]types.Role diff --git a/typedapi/security/getrolemapping/get_role_mapping.go b/typedapi/security/getrolemapping/get_role_mapping.go index 45816fb6c2..51eed97e78 100644 --- a/typedapi/security/getrolemapping/get_role_mapping.go +++ b/typedapi/security/getrolemapping/get_role_mapping.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves role mappings. +// Get role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The get role mappings API cannot retrieve role mappings that are defined in +// role mapping files. package getrolemapping import ( @@ -74,7 +80,13 @@ func NewGetRoleMappingFunc(tp elastictransport.Interface) NewGetRoleMapping { } } -// Retrieves role mappings. +// Get role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The get role mappings API cannot retrieve role mappings that are defined in +// role mapping files. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html func New(tp elastictransport.Interface) *GetRoleMapping { diff --git a/typedapi/security/getrolemapping/response.go b/typedapi/security/getrolemapping/response.go index c7bf97613a..77356c0094 100644 --- a/typedapi/security/getrolemapping/response.go +++ b/typedapi/security/getrolemapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getrolemapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_role_mapping/SecurityGetRoleMappingResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_role_mapping/SecurityGetRoleMappingResponse.ts#L23-L29 type Response map[string]types.SecurityRoleMapping diff --git a/typedapi/security/getserviceaccounts/get_service_accounts.go b/typedapi/security/getserviceaccounts/get_service_accounts.go index 89b68adb8e..8d92567132 100644 --- a/typedapi/security/getserviceaccounts/get_service_accounts.go +++ b/typedapi/security/getserviceaccounts/get_service_accounts.go @@ -16,10 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// This API returns a list of service accounts that match the provided path -// parameter(s). +// Get service accounts. +// +// Get a list of service accounts that match the provided path parameters. +// +// NOTE: Currently, only the `elastic/fleet-server` service account is +// available. package getserviceaccounts import ( @@ -78,8 +82,12 @@ func NewGetServiceAccountsFunc(tp elastictransport.Interface) NewGetServiceAccou } } -// This API returns a list of service accounts that match the provided path -// parameter(s). +// Get service accounts. +// +// Get a list of service accounts that match the provided path parameters. +// +// NOTE: Currently, only the `elastic/fleet-server` service account is +// available. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-accounts.html func New(tp elastictransport.Interface) *GetServiceAccounts { @@ -319,9 +327,9 @@ func (r *GetServiceAccounts) Header(key, value string) *GetServiceAccounts { return r } -// Namespace Name of the namespace. Omit this parameter to retrieve information about all -// service accounts. If you omit this parameter, you must also omit the -// `service` parameter. +// Namespace The name of the namespace. +// Omit this parameter to retrieve information about all service accounts. +// If you omit this parameter, you must also omit the `service` parameter. // API Name: namespace func (r *GetServiceAccounts) Namespace(namespace string) *GetServiceAccounts { r.paramSet |= namespaceMask @@ -330,8 +338,9 @@ func (r *GetServiceAccounts) Namespace(namespace string) *GetServiceAccounts { return r } -// Service Name of the service name. Omit this parameter to retrieve information about -// all service accounts that belong to the specified `namespace`. +// Service The service name. +// Omit this parameter to retrieve information about all service accounts that +// belong to the specified `namespace`. // API Name: service func (r *GetServiceAccounts) Service(service string) *GetServiceAccounts { r.paramSet |= serviceMask diff --git a/typedapi/security/getserviceaccounts/response.go b/typedapi/security/getserviceaccounts/response.go index 02ce3fe883..b18c29adae 100644 --- a/typedapi/security/getserviceaccounts/response.go +++ b/typedapi/security/getserviceaccounts/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getserviceaccounts @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getserviceaccounts // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_service_accounts/GetServiceAccountsResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_service_accounts/GetServiceAccountsResponse.ts#L23-L29 type Response map[string]types.RoleDescriptorWrapper diff --git a/typedapi/security/getservicecredentials/get_service_credentials.go b/typedapi/security/getservicecredentials/get_service_credentials.go index d73ddaf739..894e1f72eb 100644 --- a/typedapi/security/getservicecredentials/get_service_credentials.go +++ b/typedapi/security/getservicecredentials/get_service_credentials.go @@ -16,9 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information of all service credentials for a service account. +// Get service account credentials. +// +// To use this API, you must have at least the `read_security` cluster privilege +// (or a greater privilege such as `manage_service_account` or +// `manage_security`). +// +// The response includes service account tokens that were created with the +// create service account tokens API as well as file-backed tokens from all +// nodes of the cluster. +// +// NOTE: For tokens backed by the `service_tokens` file, the API collects them +// from all nodes of the cluster. +// Tokens with the same name from different nodes are assumed to be the same +// token and are only counted once towards the total number of service tokens. package getservicecredentials import ( @@ -81,7 +94,20 @@ func NewGetServiceCredentialsFunc(tp elastictransport.Interface) NewGetServiceCr } } -// Retrieves information of all service credentials for a service account. +// Get service account credentials. +// +// To use this API, you must have at least the `read_security` cluster privilege +// (or a greater privilege such as `manage_service_account` or +// `manage_security`). +// +// The response includes service account tokens that were created with the +// create service account tokens API as well as file-backed tokens from all +// nodes of the cluster. +// +// NOTE: For tokens backed by the `service_tokens` file, the API collects them +// from all nodes of the cluster. +// Tokens with the same name from different nodes are assumed to be the same +// token and are only counted once towards the total number of service tokens. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-credentials.html func New(tp elastictransport.Interface) *GetServiceCredentials { @@ -303,7 +329,7 @@ func (r *GetServiceCredentials) Header(key, value string) *GetServiceCredentials return r } -// Namespace Name of the namespace. +// Namespace The name of the namespace. // API Name: namespace func (r *GetServiceCredentials) _namespace(namespace string) *GetServiceCredentials { r.paramSet |= namespaceMask @@ -312,7 +338,7 @@ func (r *GetServiceCredentials) _namespace(namespace string) *GetServiceCredenti return r } -// Service Name of the service name. +// Service The service name. // API Name: service func (r *GetServiceCredentials) _service(service string) *GetServiceCredentials { r.paramSet |= serviceMask diff --git a/typedapi/security/getservicecredentials/response.go b/typedapi/security/getservicecredentials/response.go index 2cd5119312..91edcfdf0d 100644 --- a/typedapi/security/getservicecredentials/response.go +++ b/typedapi/security/getservicecredentials/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getservicecredentials @@ -26,10 +26,10 @@ import ( // Response holds the response body struct for the package getservicecredentials // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_service_credentials/GetServiceCredentialsResponse.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_service_credentials/GetServiceCredentialsResponse.ts#L25-L34 type Response struct { Count int `json:"count"` - // NodesCredentials Contains service account credentials collected from all nodes of the cluster + // NodesCredentials Service account credentials collected from all nodes of the cluster. NodesCredentials types.NodesCredentials `json:"nodes_credentials"` ServiceAccount string `json:"service_account"` Tokens map[string]types.Metadata `json:"tokens"` diff --git a/typedapi/security/getsettings/get_settings.go b/typedapi/security/getsettings/get_settings.go index ae3301a8fd..dc298137b3 100644 --- a/typedapi/security/getsettings/get_settings.go +++ b/typedapi/security/getsettings/get_settings.go @@ -16,21 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieve settings for the security system indices +// Get security index settings. +// +// Get the user-configurable settings for the security internal index +// (`.security` and associated indices). +// Only a subset of the index settings — those that are user-configurable—will +// be shown. +// This includes: +// +// * `index.auto_expand_replicas` +// * `index.number_of_replicas` package getsettings import ( "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -65,7 +77,16 @@ func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { } } -// Retrieve settings for the security system indices +// Get security index settings. +// +// Get the user-configurable settings for the security internal index +// (`.security` and associated indices). +// Only a subset of the index settings — those that are user-configurable—will +// be shown. +// This includes: +// +// * `index.auto_expand_replicas` +// * `index.number_of_replicas` // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-settings.html func New(tp elastictransport.Interface) *GetSettings { @@ -180,8 +201,57 @@ func (r GetSettings) Perform(providedCtx context.Context) (*http.Response, error } // Do runs the request through the transport, handle the response and returns a getsettings.Response -func (r GetSettings) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) +func (r GetSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -229,3 +299,57 @@ func (r *GetSettings) Header(key, value string) *GetSettings { return r } + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSettings) ErrorTrace(errortrace bool) *GetSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSettings) FilterPath(filterpaths ...string) *GetSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSettings) Human(human bool) *GetSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSettings) Pretty(pretty bool) *GetSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/security/getsettings/response.go b/typedapi/security/getsettings/response.go new file mode 100644 index 0000000000..eb4f199680 --- /dev/null +++ b/typedapi/security/getsettings/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package getsettings + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_settings/SecurityGetSettingsResponse.ts#L21-L36 +type Response struct { + + // Security Settings for the index used for most security configuration, including native + // realm users and roles configured with the API. + Security types.SecuritySettings `json:"security"` + // SecurityProfile Settings for the index used to store profile information. + SecurityProfile types.SecuritySettings `json:"security-profile"` + // SecurityTokens Settings for the index used to store tokens. + SecurityTokens types.SecuritySettings `json:"security-tokens"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/gettoken/get_token.go b/typedapi/security/gettoken/get_token.go index 6620750132..1c8af36655 100644 --- a/typedapi/security/gettoken/get_token.go +++ b/typedapi/security/gettoken/get_token.go @@ -16,9 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a bearer token for access without requiring basic authentication. +// Get a token. +// +// Create a bearer token for access without requiring basic authentication. +// The tokens are created by the Elasticsearch Token Service, which is +// automatically enabled when you configure TLS on the HTTP interface. +// Alternatively, you can explicitly enable the +// `xpack.security.authc.token.enabled` setting. +// When you are running in production mode, a bootstrap check prevents you from +// enabling the token service unless you also enable TLS on the HTTP interface. +// +// The get token API takes the same parameters as a typical OAuth 2.0 token API +// except for the use of a JSON request body. +// +// A successful get token API call returns a JSON structure that contains the +// access token, the amount of time (seconds) that the token expires in, the +// type, and the scope if available. +// +// The tokens returned by the get token API have a finite period of time for +// which they are valid and after that time period, they can no longer be used. +// That time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// If you want to invalidate a token immediately, you can do so by using the +// invalidate token API. package gettoken import ( @@ -74,7 +96,29 @@ func NewGetTokenFunc(tp elastictransport.Interface) NewGetToken { } } -// Creates a bearer token for access without requiring basic authentication. +// Get a token. +// +// Create a bearer token for access without requiring basic authentication. +// The tokens are created by the Elasticsearch Token Service, which is +// automatically enabled when you configure TLS on the HTTP interface. +// Alternatively, you can explicitly enable the +// `xpack.security.authc.token.enabled` setting. +// When you are running in production mode, a bootstrap check prevents you from +// enabling the token service unless you also enable TLS on the HTTP interface. +// +// The get token API takes the same parameters as a typical OAuth 2.0 token API +// except for the use of a JSON request body. +// +// A successful get token API call returns a JSON structure that contains the +// access token, the amount of time (seconds) that the token expires in, the +// type, and the scope if available. +// +// The tokens returned by the get token API have a finite period of time for +// which they are valid and after that time period, they can no longer be used. +// That time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// If you want to invalidate a token immediately, you can do so by using the +// invalidate token API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html func New(tp elastictransport.Interface) *GetToken { @@ -84,8 +128,6 @@ func New(tp elastictransport.Interface) *GetToken { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -339,46 +381,90 @@ func (r *GetToken) Pretty(pretty bool) *GetToken { return r } +// The type of grant. +// Supported grant types are: `password`, `_kerberos`, `client_credentials`, and +// `refresh_token`. // API name: grant_type func (r *GetToken) GrantType(granttype accesstokengranttype.AccessTokenGrantType) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GrantType = &granttype - return r } +// The base64 encoded kerberos ticket. +// If you specify the `_kerberos` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. // API name: kerberos_ticket func (r *GetToken) KerberosTicket(kerberosticket string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.KerberosTicket = &kerberosticket return r } +// The user's password. +// If you specify the `password` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. // API name: password func (r *GetToken) Password(password string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Password = &password return r } +// The string that was returned when you created the token, which enables you to +// extend its life. +// If you specify the `refresh_token` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. // API name: refresh_token func (r *GetToken) RefreshToken(refreshtoken string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RefreshToken = &refreshtoken return r } +// The scope of the token. +// Currently tokens are only issued for a scope of FULL regardless of the value +// sent with the request. // API name: scope func (r *GetToken) Scope(scope string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Scope = &scope return r } +// The username that identifies the user. +// If you specify the `password` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. // API name: username func (r *GetToken) Username(username string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Username = &username return r diff --git a/typedapi/security/gettoken/request.go b/typedapi/security/gettoken/request.go index 73c317df71..87c43abb5d 100644 --- a/typedapi/security/gettoken/request.go +++ b/typedapi/security/gettoken/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package gettoken @@ -33,14 +33,34 @@ import ( // Request holds the request body struct for the package gettoken // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_token/GetUserAccessTokenRequest.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_token/GetUserAccessTokenRequest.ts#L25-L90 type Request struct { - GrantType *accesstokengranttype.AccessTokenGrantType `json:"grant_type,omitempty"` - KerberosTicket *string `json:"kerberos_ticket,omitempty"` - Password *string `json:"password,omitempty"` - RefreshToken *string `json:"refresh_token,omitempty"` - Scope *string `json:"scope,omitempty"` - Username *string `json:"username,omitempty"` + + // GrantType The type of grant. + // Supported grant types are: `password`, `_kerberos`, `client_credentials`, and + // `refresh_token`. + GrantType *accesstokengranttype.AccessTokenGrantType `json:"grant_type,omitempty"` + // KerberosTicket The base64 encoded kerberos ticket. + // If you specify the `_kerberos` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + KerberosTicket *string `json:"kerberos_ticket,omitempty"` + // Password The user's password. + // If you specify the `password` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + Password *string `json:"password,omitempty"` + // RefreshToken The string that was returned when you created the token, which enables you to + // extend its life. + // If you specify the `refresh_token` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + RefreshToken *string `json:"refresh_token,omitempty"` + // Scope The scope of the token. + // Currently tokens are only issued for a scope of FULL regardless of the value + // sent with the request. + Scope *string `json:"scope,omitempty"` + // Username The username that identifies the user. + // If you specify the `password` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/gettoken/response.go b/typedapi/security/gettoken/response.go index 94a37cafc0..186a20d02d 100644 --- a/typedapi/security/gettoken/response.go +++ b/typedapi/security/gettoken/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package gettoken @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettoken // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_token/GetUserAccessTokenResponse.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_token/GetUserAccessTokenResponse.ts#L23-L33 type Response struct { AccessToken string `json:"access_token"` Authentication types.AuthenticatedUser `json:"authentication"` diff --git a/typedapi/security/getuser/get_user.go b/typedapi/security/getuser/get_user.go index f344c0addc..f58e036592 100644 --- a/typedapi/security/getuser/get_user.go +++ b/typedapi/security/getuser/get_user.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information about users in the native realm and built-in users. +// Get users. +// +// Get information about users in the native realm and built-in users. package getuser import ( @@ -74,7 +76,9 @@ func NewGetUserFunc(tp elastictransport.Interface) NewGetUser { } } -// Retrieves information about users in the native realm and built-in users. +// Get users. +// +// Get information about users in the native realm and built-in users. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html func New(tp elastictransport.Interface) *GetUser { @@ -306,7 +310,8 @@ func (r *GetUser) Username(usernames ...string) *GetUser { return r } -// WithProfileUid If true will return the User Profile ID for a user, if any. +// WithProfileUid Determines whether to retrieve the user profile UID, if it exists, for the +// users. // API name: with_profile_uid func (r *GetUser) WithProfileUid(withprofileuid bool) *GetUser { r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) diff --git a/typedapi/security/getuser/response.go b/typedapi/security/getuser/response.go index 2850dbf4f5..ef189d8c8a 100644 --- a/typedapi/security/getuser/response.go +++ b/typedapi/security/getuser/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getuser @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getuser // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_user/SecurityGetUserResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_user/SecurityGetUserResponse.ts#L23-L30 type Response map[string]types.User diff --git a/typedapi/security/getuserprivileges/get_user_privileges.go b/typedapi/security/getuserprivileges/get_user_privileges.go index c943cfadd9..b8443336c7 100644 --- a/typedapi/security/getuserprivileges/get_user_privileges.go +++ b/typedapi/security/getuserprivileges/get_user_privileges.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves security privileges for the logged in user. +// Get user privileges. +// +// Get the security privileges for the logged in user. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +// To check whether a user has a specific list of privileges, use the has +// privileges API. package getuserprivileges import ( @@ -68,7 +74,13 @@ func NewGetUserPrivilegesFunc(tp elastictransport.Interface) NewGetUserPrivilege } } -// Retrieves security privileges for the logged in user. +// Get user privileges. +// +// Get the security privileges for the logged in user. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +// To check whether a user has a specific list of privileges, use the has +// privileges API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html func New(tp elastictransport.Interface) *GetUserPrivileges { diff --git a/typedapi/security/getuserprivileges/response.go b/typedapi/security/getuserprivileges/response.go index 2b36eb81db..6e5afb6f61 100644 --- a/typedapi/security/getuserprivileges/response.go +++ b/typedapi/security/getuserprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getuserprivileges @@ -26,13 +26,15 @@ import ( // Response holds the response body struct for the package getuserprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_user_privileges/SecurityGetUserPrivilegesResponse.ts#L27-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_user_privileges/SecurityGetUserPrivilegesResponse.ts#L28-L38 type Response struct { - Applications []types.ApplicationPrivileges `json:"applications"` - Cluster []string `json:"cluster"` - Global []types.GlobalPrivilege `json:"global"` - Indices []types.UserIndicesPrivileges `json:"indices"` - RunAs []string `json:"run_as"` + Applications []types.ApplicationPrivileges `json:"applications"` + Cluster []string `json:"cluster"` + Global []types.GlobalPrivilege `json:"global"` + Indices []types.UserIndicesPrivileges `json:"indices"` + RemoteCluster []types.RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + RemoteIndices []types.RemoteUserIndicesPrivileges `json:"remote_indices,omitempty"` + RunAs []string `json:"run_as"` } // NewResponse returns a Response diff --git a/typedapi/security/getuserprofile/get_user_profile.go b/typedapi/security/getuserprofile/get_user_profile.go index 475c8d3971..0326d6dbb3 100644 --- a/typedapi/security/getuserprofile/get_user_profile.go +++ b/typedapi/security/getuserprofile/get_user_profile.go @@ -16,9 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves a user's profile using the unique profile ID. +// Get a user profile. +// +// Get a user's profile using the unique profile ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. package getuserprofile import ( @@ -76,7 +84,15 @@ func NewGetUserProfileFunc(tp elastictransport.Interface) NewGetUserProfile { } } -// Retrieves a user's profile using the unique profile ID. +// Get a user profile. +// +// Get a user's profile using the unique profile ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html func New(tp elastictransport.Interface) *GetUserProfile { @@ -299,9 +315,11 @@ func (r *GetUserProfile) _uid(uids ...string) *GetUserProfile { return r } -// Data List of filters for the `data` field of the profile document. -// To return all content use `data=*`. To return a subset of content -// use `data=` to retrieve content nested under the specified ``. +// Data A comma-separated list of filters for the `data` field of the profile +// document. +// To return all content use `data=*`. +// To return a subset of content use `data=` to retrieve content nested +// under the specified ``. // By default returns no `data` content. // API name: data func (r *GetUserProfile) Data(data ...string) *GetUserProfile { diff --git a/typedapi/security/getuserprofile/response.go b/typedapi/security/getuserprofile/response.go index e5fb4380af..7dc268ded3 100644 --- a/typedapi/security/getuserprofile/response.go +++ b/typedapi/security/getuserprofile/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getuserprofile @@ -26,9 +26,15 @@ import ( // Response holds the response body struct for the package getuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_user_profile/Response.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_user_profile/Response.ts#L23-L33 type Response struct { - Errors *types.GetUserProfileErrors `json:"errors,omitempty"` + Errors *types.GetUserProfileErrors `json:"errors,omitempty"` + // Profiles A successful call returns the JSON representation of the user profile and its + // internal versioning numbers. + // The API returns an empty object if no profile document is found for the + // provided `uid`. + // The content of the data field is not returned by default to avoid + // deserializing a potential large payload. Profiles []types.UserProfileWithMetadata `json:"profiles"` } diff --git a/typedapi/security/grantapikey/grant_api_key.go b/typedapi/security/grantapikey/grant_api_key.go index b2e7177777..47d3ca0ef5 100644 --- a/typedapi/security/grantapikey/grant_api_key.go +++ b/typedapi/security/grantapikey/grant_api_key.go @@ -16,16 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Creates an API key on behalf of another user. -// This API is similar to Create API keys, however it creates the API key for a -// user that is different than the user that runs the API. -// The caller must have authentication credentials (either an access token, or a -// username and password) for the user on whose behalf the API key will be -// created. -// It is not possible to use this API to create an API key without that user’s +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Grant an API key. +// +// Create an API key on behalf of another user. +// This API is similar to the create API keys API, however it creates the API +// key for a user that is different than the user that runs the API. +// The caller must have authentication credentials for the user on whose behalf +// the API key will be created. +// It is not possible to use this API to create an API key without that user's // credentials. +// The supported user authentication credential types are: +// +// * username and password +// * Elasticsearch access tokens +// * JWTs +// // The user, for whom the authentication credentials is provided, can optionally // "run as" (impersonate) another user. // In this case, the API key will be created on behalf of the impersonated user. @@ -33,6 +40,8 @@ // This API is intended be used by applications that need to create and manage // API keys for end users, but cannot guarantee that those users have permission // to create API keys on their own behalf. +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. // // A successful grant API key API call returns a JSON structure that contains // the API key, its unique id, and its name. @@ -96,14 +105,21 @@ func NewGrantApiKeyFunc(tp elastictransport.Interface) NewGrantApiKey { } } -// Creates an API key on behalf of another user. -// This API is similar to Create API keys, however it creates the API key for a -// user that is different than the user that runs the API. -// The caller must have authentication credentials (either an access token, or a -// username and password) for the user on whose behalf the API key will be -// created. -// It is not possible to use this API to create an API key without that user’s +// Grant an API key. +// +// Create an API key on behalf of another user. +// This API is similar to the create API keys API, however it creates the API +// key for a user that is different than the user that runs the API. +// The caller must have authentication credentials for the user on whose behalf +// the API key will be created. +// It is not possible to use this API to create an API key without that user's // credentials. +// The supported user authentication credential types are: +// +// * username and password +// * Elasticsearch access tokens +// * JWTs +// // The user, for whom the authentication credentials is provided, can optionally // "run as" (impersonate) another user. // In this case, the API key will be created on behalf of the impersonated user. @@ -111,6 +127,8 @@ func NewGrantApiKeyFunc(tp elastictransport.Interface) NewGrantApiKey { // This API is intended be used by applications that need to create and manage // API keys for end users, but cannot guarantee that those users have permission // to create API keys on their own behalf. +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. // // A successful grant API key API call returns a JSON structure that contains // the API key, its unique id, and its name. @@ -128,8 +146,6 @@ func New(tp elastictransport.Interface) *GrantApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -383,57 +399,83 @@ func (r *GrantApiKey) Pretty(pretty bool) *GrantApiKey { return r } -// AccessToken The user’s access token. +// The user's access token. // If you specify the `access_token` grant type, this parameter is required. // It is not valid with other grant types. // API name: access_token func (r *GrantApiKey) AccessToken(accesstoken string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AccessToken = &accesstoken return r } -// ApiKey Defines the API key. +// The API key. // API name: api_key -func (r *GrantApiKey) ApiKey(apikey *types.GrantApiKey) *GrantApiKey { +func (r *GrantApiKey) ApiKey(apikey types.GrantApiKeyVariant) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ApiKey = *apikey + r.req.ApiKey = *apikey.GrantApiKeyCaster() return r } -// GrantType The type of grant. Supported grant types are: `access_token`, `password`. +// The type of grant. Supported grant types are: `access_token`, `password`. // API name: grant_type func (r *GrantApiKey) GrantType(granttype apikeygranttype.ApiKeyGrantType) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GrantType = granttype - return r } -// Password The user’s password. If you specify the `password` grant type, this parameter -// is required. +// The user's password. +// If you specify the `password` grant type, this parameter is required. // It is not valid with other grant types. // API name: password func (r *GrantApiKey) Password(password string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Password = &password return r } -// RunAs The name of the user to be impersonated. +// The name of the user to be impersonated. // API name: run_as func (r *GrantApiKey) RunAs(username string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RunAs = &username return r } -// Username The user name that identifies the user. +// The user name that identifies the user. // If you specify the `password` grant type, this parameter is required. // It is not valid with other grant types. // API name: username func (r *GrantApiKey) Username(username string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Username = &username return r diff --git a/typedapi/security/grantapikey/request.go b/typedapi/security/grantapikey/request.go index cb7fe2d3dc..b75a3498a3 100644 --- a/typedapi/security/grantapikey/request.go +++ b/typedapi/security/grantapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package grantapikey @@ -34,19 +34,19 @@ import ( // Request holds the request body struct for the package grantapikey // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/grant_api_key/SecurityGrantApiKeyRequest.ts#L24-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/grant_api_key/SecurityGrantApiKeyRequest.ts#L24-L92 type Request struct { - // AccessToken The user’s access token. + // AccessToken The user's access token. // If you specify the `access_token` grant type, this parameter is required. // It is not valid with other grant types. AccessToken *string `json:"access_token,omitempty"` - // ApiKey Defines the API key. + // ApiKey The API key. ApiKey types.GrantApiKey `json:"api_key"` // GrantType The type of grant. Supported grant types are: `access_token`, `password`. GrantType apikeygranttype.ApiKeyGrantType `json:"grant_type"` - // Password The user’s password. If you specify the `password` grant type, this parameter - // is required. + // Password The user's password. + // If you specify the `password` grant type, this parameter is required. // It is not valid with other grant types. Password *string `json:"password,omitempty"` // RunAs The name of the user to be impersonated. diff --git a/typedapi/security/grantapikey/response.go b/typedapi/security/grantapikey/response.go index 8184fc2f2a..28bcc11ace 100644 --- a/typedapi/security/grantapikey/response.go +++ b/typedapi/security/grantapikey/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package grantapikey // Response holds the response body struct for the package grantapikey // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/grant_api_key/SecurityGrantApiKeyResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/grant_api_key/SecurityGrantApiKeyResponse.ts#L23-L31 type Response struct { ApiKey string `json:"api_key"` Encoded string `json:"encoded"` diff --git a/typedapi/security/hasprivileges/has_privileges.go b/typedapi/security/hasprivileges/has_privileges.go index 304d944fba..86e594909a 100644 --- a/typedapi/security/hasprivileges/has_privileges.go +++ b/typedapi/security/hasprivileges/has_privileges.go @@ -16,10 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Check user privileges. -// Determines whether the specified user has a specified list of privileges. +// +// Determine whether the specified user has a specified list of privileges. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. package hasprivileges import ( @@ -82,7 +85,10 @@ func NewHasPrivilegesFunc(tp elastictransport.Interface) NewHasPrivileges { } // Check user privileges. -// Determines whether the specified user has a specified list of privileges. +// +// Determine whether the specified user has a specified list of privileges. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html func New(tp elastictransport.Interface) *HasPrivileges { @@ -92,8 +98,6 @@ func New(tp elastictransport.Interface) *HasPrivileges { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -372,23 +376,44 @@ func (r *HasPrivileges) Pretty(pretty bool) *HasPrivileges { } // API name: application -func (r *HasPrivileges) Application(applications ...types.ApplicationPrivilegesCheck) *HasPrivileges { - r.req.Application = applications +func (r *HasPrivileges) Application(applications ...types.ApplicationPrivilegesCheckVariant) *HasPrivileges { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range applications { + + r.req.Application = append(r.req.Application, *v.ApplicationPrivilegesCheckCaster()) + } return r } -// Cluster A list of the cluster privileges that you want to check. +// A list of the cluster privileges that you want to check. // API name: cluster func (r *HasPrivileges) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *HasPrivileges { - r.req.Cluster = clusters + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range clusters { + + r.req.Cluster = append(r.req.Cluster, v) + } return r } // API name: index -func (r *HasPrivileges) Index(indices ...types.IndexPrivilegesCheck) *HasPrivileges { - r.req.Index = indices +func (r *HasPrivileges) Index(indices ...types.IndexPrivilegesCheckVariant) *HasPrivileges { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + + r.req.Index = append(r.req.Index, *v.IndexPrivilegesCheckCaster()) + } return r } diff --git a/typedapi/security/hasprivileges/request.go b/typedapi/security/hasprivileges/request.go index b31d4a4c7e..6c4eb34434 100644 --- a/typedapi/security/hasprivileges/request.go +++ b/typedapi/security/hasprivileges/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package hasprivileges @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package hasprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges/SecurityHasPrivilegesRequest.ts#L25-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges/SecurityHasPrivilegesRequest.ts#L25-L59 type Request struct { Application []types.ApplicationPrivilegesCheck `json:"application,omitempty"` // Cluster A list of the cluster privileges that you want to check. diff --git a/typedapi/security/hasprivileges/response.go b/typedapi/security/hasprivileges/response.go index 5f7431985f..5b31e12faa 100644 --- a/typedapi/security/hasprivileges/response.go +++ b/typedapi/security/hasprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package hasprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package hasprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges/SecurityHasPrivilegesResponse.ts#L24-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges/SecurityHasPrivilegesResponse.ts#L24-L35 type Response struct { Application types.ApplicationsPrivileges `json:"application"` Cluster map[string]bool `json:"cluster"` diff --git a/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go b/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go index e118828877..71728ec7e0 100644 --- a/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go +++ b/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go @@ -16,10 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Determines whether the users associated with the specified profile IDs have -// all the requested privileges. +// Check user profile privileges. +// +// Determine whether the users associated with the specified user profile IDs +// have all the requested privileges. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. package hasprivilegesuserprofile import ( @@ -74,8 +82,16 @@ func NewHasPrivilegesUserProfileFunc(tp elastictransport.Interface) NewHasPrivil } } -// Determines whether the users associated with the specified profile IDs have -// all the requested privileges. +// Check user profile privileges. +// +// Determine whether the users associated with the specified user profile IDs +// have all the requested privileges. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges-user-profile.html func New(tp elastictransport.Interface) *HasPrivilegesUserProfile { @@ -85,8 +101,6 @@ func New(tp elastictransport.Interface) *HasPrivilegesUserProfile { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -340,19 +354,31 @@ func (r *HasPrivilegesUserProfile) Pretty(pretty bool) *HasPrivilegesUserProfile return r } +// An object containing all the privileges to be checked. // API name: privileges -func (r *HasPrivilegesUserProfile) Privileges(privileges *types.PrivilegesCheck) *HasPrivilegesUserProfile { +func (r *HasPrivilegesUserProfile) Privileges(privileges types.PrivilegesCheckVariant) *HasPrivilegesUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Privileges = *privileges + r.req.Privileges = *privileges.PrivilegesCheckCaster() return r } -// Uids A list of profile IDs. The privileges are checked for associated users of the +// A list of profile IDs. The privileges are checked for associated users of the // profiles. // API name: uids func (r *HasPrivilegesUserProfile) Uids(uids ...string) *HasPrivilegesUserProfile { - r.req.Uids = uids + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range uids { + r.req.Uids = append(r.req.Uids, v) + + } return r } diff --git a/typedapi/security/hasprivilegesuserprofile/request.go b/typedapi/security/hasprivilegesuserprofile/request.go index a53bb10ac4..bebde6c7b2 100644 --- a/typedapi/security/hasprivilegesuserprofile/request.go +++ b/typedapi/security/hasprivilegesuserprofile/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package hasprivilegesuserprofile @@ -29,8 +29,10 @@ import ( // Request holds the request body struct for the package hasprivilegesuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges_user_profile/Request.ts#L24-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges_user_profile/Request.ts#L24-L55 type Request struct { + + // Privileges An object containing all the privileges to be checked. Privileges types.PrivilegesCheck `json:"privileges"` // Uids A list of profile IDs. The privileges are checked for associated users of the // profiles. diff --git a/typedapi/security/hasprivilegesuserprofile/response.go b/typedapi/security/hasprivilegesuserprofile/response.go index dd7d7cfbed..a9124286f2 100644 --- a/typedapi/security/hasprivilegesuserprofile/response.go +++ b/typedapi/security/hasprivilegesuserprofile/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package hasprivilegesuserprofile @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package hasprivilegesuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges_user_profile/Response.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges_user_profile/Response.ts#L23-L38 type Response struct { // Errors The subset of the requested profile IDs for which an error diff --git a/typedapi/security/invalidateapikey/invalidate_api_key.go b/typedapi/security/invalidateapikey/invalidate_api_key.go index ff74f660ff..e7c154452f 100644 --- a/typedapi/security/invalidateapikey/invalidate_api_key.go +++ b/typedapi/security/invalidateapikey/invalidate_api_key.go @@ -16,19 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Invalidate API keys. -// Invalidates one or more API keys. -// The `manage_api_key` privilege allows deleting any API keys. -// The `manage_own_api_key` only allows deleting API keys that are owned by the -// user. +// +// This API invalidates API keys created by the create API key or grant API key +// APIs. +// Invalidated API keys fail authentication, but they can still be viewed using +// the get API key information and query API key information APIs, for at least +// the configured retention period, until they are automatically deleted. +// +// To use this API, you must have at least the `manage_security`, +// `manage_api_key`, or `manage_own_api_key` cluster privileges. +// The `manage_security` privilege allows deleting any API key, including both +// REST and cross cluster API keys. +// The `manage_api_key` privilege allows deleting any REST API key, but not +// cross cluster API keys. +// The `manage_own_api_key` only allows deleting REST API keys that are owned by +// the user. // In addition, with the `manage_own_api_key` privilege, an invalidation request // must be issued in one of the three formats: +// // - Set the parameter `owner=true`. -// - Or, set both `username` and `realm_name` to match the user’s identity. -// - Or, if the request is issued by an API key, i.e. an API key invalidates -// itself, specify its ID in the `ids` field. +// - Or, set both `username` and `realm_name` to match the user's identity. +// - Or, if the request is issued by an API key, that is to say an API key +// invalidates itself, specify its ID in the `ids` field. package invalidateapikey import ( @@ -84,16 +96,28 @@ func NewInvalidateApiKeyFunc(tp elastictransport.Interface) NewInvalidateApiKey } // Invalidate API keys. -// Invalidates one or more API keys. -// The `manage_api_key` privilege allows deleting any API keys. -// The `manage_own_api_key` only allows deleting API keys that are owned by the -// user. +// +// This API invalidates API keys created by the create API key or grant API key +// APIs. +// Invalidated API keys fail authentication, but they can still be viewed using +// the get API key information and query API key information APIs, for at least +// the configured retention period, until they are automatically deleted. +// +// To use this API, you must have at least the `manage_security`, +// `manage_api_key`, or `manage_own_api_key` cluster privileges. +// The `manage_security` privilege allows deleting any API key, including both +// REST and cross cluster API keys. +// The `manage_api_key` privilege allows deleting any REST API key, but not +// cross cluster API keys. +// The `manage_own_api_key` only allows deleting REST API keys that are owned by +// the user. // In addition, with the `manage_own_api_key` privilege, an invalidation request // must be issued in one of the three formats: +// // - Set the parameter `owner=true`. -// - Or, set both `username` and `realm_name` to match the user’s identity. -// - Or, if the request is issued by an API key, i.e. an API key invalidates -// itself, specify its ID in the `ids` field. +// - Or, set both `username` and `realm_name` to match the user's identity. +// - Or, if the request is issued by an API key, that is to say an API key +// invalidates itself, specify its ID in the `ids` field. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html func New(tp elastictransport.Interface) *InvalidateApiKey { @@ -103,8 +127,6 @@ func New(tp elastictransport.Interface) *InvalidateApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -358,57 +380,91 @@ func (r *InvalidateApiKey) Pretty(pretty bool) *InvalidateApiKey { // API name: id func (r *InvalidateApiKey) Id(id string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Id = &id return r } -// Ids A list of API key ids. +// A list of API key ids. // This parameter cannot be used with any of `name`, `realm_name`, or // `username`. // API name: ids func (r *InvalidateApiKey) Ids(ids ...string) *InvalidateApiKey { - r.req.Ids = ids + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ids { + + r.req.Ids = append(r.req.Ids, v) + } return r } -// Name An API key name. +// An API key name. // This parameter cannot be used with any of `ids`, `realm_name` or `username`. // API name: name func (r *InvalidateApiKey) Name(name string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Name = &name return r } -// Owner Can be used to query API keys owned by the currently authenticated user. +// Query API keys owned by the currently authenticated user. // The `realm_name` or `username` parameters cannot be specified when this // parameter is set to `true` as they are assumed to be the currently // authenticated ones. +// +// NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be +// specified if `owner` is `false`. // API name: owner func (r *InvalidateApiKey) Owner(owner bool) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Owner = &owner return r } -// RealmName The name of an authentication realm. +// The name of an authentication realm. // This parameter cannot be used with either `ids` or `name`, or when `owner` // flag is set to `true`. // API name: realm_name func (r *InvalidateApiKey) RealmName(realmname string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RealmName = &realmname return r } -// Username The username of a user. -// This parameter cannot be used with either `ids` or `name`, or when `owner` +// The username of a user. +// This parameter cannot be used with either `ids` or `name` or when `owner` // flag is set to `true`. // API name: username func (r *InvalidateApiKey) Username(username string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Username = &username return r diff --git a/typedapi/security/invalidateapikey/request.go b/typedapi/security/invalidateapikey/request.go index 96a5f9bd5e..63be4daeaf 100644 --- a/typedapi/security/invalidateapikey/request.go +++ b/typedapi/security/invalidateapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package invalidateapikey @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package invalidateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/invalidate_api_key/SecurityInvalidateApiKeyRequest.ts#L23-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/invalidate_api_key/SecurityInvalidateApiKeyRequest.ts#L23-L82 type Request struct { Id *string `json:"id,omitempty"` // Ids A list of API key ids. @@ -41,17 +41,20 @@ type Request struct { // Name An API key name. // This parameter cannot be used with any of `ids`, `realm_name` or `username`. Name *string `json:"name,omitempty"` - // Owner Can be used to query API keys owned by the currently authenticated user. + // Owner Query API keys owned by the currently authenticated user. // The `realm_name` or `username` parameters cannot be specified when this // parameter is set to `true` as they are assumed to be the currently // authenticated ones. + // + // NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be + // specified if `owner` is `false`. Owner *bool `json:"owner,omitempty"` // RealmName The name of an authentication realm. // This parameter cannot be used with either `ids` or `name`, or when `owner` // flag is set to `true`. RealmName *string `json:"realm_name,omitempty"` // Username The username of a user. - // This parameter cannot be used with either `ids` or `name`, or when `owner` + // This parameter cannot be used with either `ids` or `name` or when `owner` // flag is set to `true`. Username *string `json:"username,omitempty"` } diff --git a/typedapi/security/invalidateapikey/response.go b/typedapi/security/invalidateapikey/response.go index 2d66897b53..130c47d372 100644 --- a/typedapi/security/invalidateapikey/response.go +++ b/typedapi/security/invalidateapikey/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package invalidateapikey @@ -26,12 +26,18 @@ import ( // Response holds the response body struct for the package invalidateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/invalidate_api_key/SecurityInvalidateApiKeyResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/invalidate_api_key/SecurityInvalidateApiKeyResponse.ts#L23-L46 type Response struct { - ErrorCount int `json:"error_count"` - ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` - InvalidatedApiKeys []string `json:"invalidated_api_keys"` - PreviouslyInvalidatedApiKeys []string `json:"previously_invalidated_api_keys"` + + // ErrorCount The number of errors that were encountered when invalidating the API keys. + ErrorCount int `json:"error_count"` + // ErrorDetails Details about the errors. + // This field is not present in the response when `error_count` is `0`. + ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` + // InvalidatedApiKeys The IDs of the API keys that were invalidated as part of this request. + InvalidatedApiKeys []string `json:"invalidated_api_keys"` + // PreviouslyInvalidatedApiKeys The IDs of the API keys that were already invalidated. + PreviouslyInvalidatedApiKeys []string `json:"previously_invalidated_api_keys"` } // NewResponse returns a Response diff --git a/typedapi/security/invalidatetoken/invalidate_token.go b/typedapi/security/invalidatetoken/invalidate_token.go index 54a5e8282a..1a63fc2f8c 100644 --- a/typedapi/security/invalidatetoken/invalidate_token.go +++ b/typedapi/security/invalidatetoken/invalidate_token.go @@ -16,9 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Invalidates one or more access tokens or refresh tokens. +// Invalidate a token. +// +// The access tokens returned by the get token API have a finite period of time +// for which they are valid. +// After that time period, they can no longer be used. +// The time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// +// The refresh tokens returned by the get token API are only valid for 24 hours. +// They can also be used exactly once. +// If you want to invalidate one or more access or refresh tokens immediately, +// use this invalidate token API. +// +// NOTE: While all parameters are optional, at least one of them is required. +// More specifically, either one of `token` or `refresh_token` parameters is +// required. +// If none of these two are specified, then `realm_name` and/or `username` need +// to be specified. package invalidatetoken import ( @@ -73,7 +90,24 @@ func NewInvalidateTokenFunc(tp elastictransport.Interface) NewInvalidateToken { } } -// Invalidates one or more access tokens or refresh tokens. +// Invalidate a token. +// +// The access tokens returned by the get token API have a finite period of time +// for which they are valid. +// After that time period, they can no longer be used. +// The time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// +// The refresh tokens returned by the get token API are only valid for 24 hours. +// They can also be used exactly once. +// If you want to invalidate one or more access or refresh tokens immediately, +// use this invalidate token API. +// +// NOTE: While all parameters are optional, at least one of them is required. +// More specifically, either one of `token` or `refresh_token` parameters is +// required. +// If none of these two are specified, then `realm_name` and/or `username` need +// to be specified. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html func New(tp elastictransport.Interface) *InvalidateToken { @@ -83,8 +117,6 @@ func New(tp elastictransport.Interface) *InvalidateToken { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -338,31 +370,59 @@ func (r *InvalidateToken) Pretty(pretty bool) *InvalidateToken { return r } +// The name of an authentication realm. +// This parameter cannot be used with either `refresh_token` or `token`. // API name: realm_name func (r *InvalidateToken) RealmName(name string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RealmName = &name return r } +// A refresh token. +// This parameter cannot be used if any of `refresh_token`, `realm_name`, or +// `username` are used. // API name: refresh_token func (r *InvalidateToken) RefreshToken(refreshtoken string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RefreshToken = &refreshtoken return r } +// An access token. +// This parameter cannot be used if any of `refresh_token`, `realm_name`, or +// `username` are used. // API name: token func (r *InvalidateToken) Token(token string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Token = &token return r } +// The username of a user. +// This parameter cannot be used with either `refresh_token` or `token`. // API name: username func (r *InvalidateToken) Username(username string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Username = &username return r diff --git a/typedapi/security/invalidatetoken/request.go b/typedapi/security/invalidatetoken/request.go index aaadebc1cf..e5febcc5f5 100644 --- a/typedapi/security/invalidatetoken/request.go +++ b/typedapi/security/invalidatetoken/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package invalidatetoken @@ -31,12 +31,23 @@ import ( // Request holds the request body struct for the package invalidatetoken // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/invalidate_token/SecurityInvalidateTokenRequest.ts#L23-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/invalidate_token/SecurityInvalidateTokenRequest.ts#L23-L71 type Request struct { - RealmName *string `json:"realm_name,omitempty"` + + // RealmName The name of an authentication realm. + // This parameter cannot be used with either `refresh_token` or `token`. + RealmName *string `json:"realm_name,omitempty"` + // RefreshToken A refresh token. + // This parameter cannot be used if any of `refresh_token`, `realm_name`, or + // `username` are used. RefreshToken *string `json:"refresh_token,omitempty"` - Token *string `json:"token,omitempty"` - Username *string `json:"username,omitempty"` + // Token An access token. + // This parameter cannot be used if any of `refresh_token`, `realm_name`, or + // `username` are used. + Token *string `json:"token,omitempty"` + // Username The username of a user. + // This parameter cannot be used with either `refresh_token` or `token`. + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/invalidatetoken/response.go b/typedapi/security/invalidatetoken/response.go index 62ade0c438..f80cd1f394 100644 --- a/typedapi/security/invalidatetoken/response.go +++ b/typedapi/security/invalidatetoken/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package invalidatetoken @@ -26,12 +26,18 @@ import ( // Response holds the response body struct for the package invalidatetoken // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/invalidate_token/SecurityInvalidateTokenResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/invalidate_token/SecurityInvalidateTokenResponse.ts#L23-L46 type Response struct { - ErrorCount int64 `json:"error_count"` - ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` - InvalidatedTokens int64 `json:"invalidated_tokens"` - PreviouslyInvalidatedTokens int64 `json:"previously_invalidated_tokens"` + + // ErrorCount The number of errors that were encountered when invalidating the tokens. + ErrorCount int64 `json:"error_count"` + // ErrorDetails Details about the errors. + // This field is not present in the response when `error_count` is `0`. + ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` + // InvalidatedTokens The number of the tokens that were invalidated as part of this request. + InvalidatedTokens int64 `json:"invalidated_tokens"` + // PreviouslyInvalidatedTokens The number of tokens that were already invalidated. + PreviouslyInvalidatedTokens int64 `json:"previously_invalidated_tokens"` } // NewResponse returns a Response diff --git a/typedapi/security/oidcauthenticate/oidc_authenticate.go b/typedapi/security/oidcauthenticate/oidc_authenticate.go index 0407620045..92288abc10 100644 --- a/typedapi/security/oidcauthenticate/oidc_authenticate.go +++ b/typedapi/security/oidcauthenticate/oidc_authenticate.go @@ -16,22 +16,35 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Exchanges an OpenID Connection authentication response message for an -// Elasticsearch access token and refresh token pair +// Authenticate OpenID Connect. +// +// Exchange an OpenID Connect authentication response message for an +// Elasticsearch internal access token and refresh token that can be +// subsequently used for authentication. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. package oidcauthenticate import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -46,6 +59,10 @@ type OidcAuthenticate struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -66,8 +83,17 @@ func NewOidcAuthenticateFunc(tp elastictransport.Interface) NewOidcAuthenticate } } -// Exchanges an OpenID Connection authentication response message for an -// Elasticsearch access token and refresh token pair +// Authenticate OpenID Connect. +// +// Exchange an OpenID Connect authentication response message for an +// Elasticsearch internal access token and refresh token that can be +// subsequently used for authentication. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-authenticate.html func New(tp elastictransport.Interface) *OidcAuthenticate { @@ -75,6 +101,8 @@ func New(tp elastictransport.Interface) *OidcAuthenticate { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -86,6 +114,21 @@ func New(tp elastictransport.Interface) *OidcAuthenticate { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OidcAuthenticate) Raw(raw io.Reader) *OidcAuthenticate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OidcAuthenticate) Request(req *Request) *OidcAuthenticate { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *OidcAuthenticate) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -95,6 +138,31 @@ func (r *OidcAuthenticate) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OidcAuthenticate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -184,13 +252,7 @@ func (r OidcAuthenticate) Perform(providedCtx context.Context) (*http.Response, } // Do runs the request through the transport, handle the response and returns a oidcauthenticate.Response -func (r OidcAuthenticate) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r OidcAuthenticate) IsSuccess(providedCtx context.Context) (bool, error) { +func (r OidcAuthenticate) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -201,30 +263,46 @@ func (r OidcAuthenticate) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the OidcAuthenticate query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err } - return false, nil + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the OidcAuthenticate headers map. @@ -233,3 +311,110 @@ func (r *OidcAuthenticate) Header(key, value string) *OidcAuthenticate { return r } + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OidcAuthenticate) ErrorTrace(errortrace bool) *OidcAuthenticate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OidcAuthenticate) FilterPath(filterpaths ...string) *OidcAuthenticate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OidcAuthenticate) Human(human bool) *OidcAuthenticate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OidcAuthenticate) Pretty(pretty bool) *OidcAuthenticate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Associate a client session with an ID token and mitigate replay attacks. +// This value needs to be the same as the one that was provided to the +// `/_security/oidc/prepare` API or the one that was generated by Elasticsearch +// and included in the response to that call. +// API name: nonce +func (r *OidcAuthenticate) Nonce(nonce string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Nonce = nonce + + return r +} + +// The name of the OpenID Connect realm. +// This property is useful in cases where multiple realms are defined. +// API name: realm +func (r *OidcAuthenticate) Realm(realm string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = &realm + + return r +} + +// The URL to which the OpenID Connect Provider redirected the User Agent in +// response to an authentication request after a successful authentication. +// This URL must be provided as-is (URL encoded), taken from the body of the +// response or as the value of a location header in the response from the OpenID +// Connect Provider. +// API name: redirect_uri +func (r *OidcAuthenticate) RedirectUri(redirecturi string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RedirectUri = redirecturi + + return r +} + +// Maintain state between the authentication request and the response. +// This value needs to be the same as the one that was provided to the +// `/_security/oidc/prepare` API or the one that was generated by Elasticsearch +// and included in the response to that call. +// API name: state +func (r *OidcAuthenticate) State(state string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.State = state + + return r +} diff --git a/typedapi/security/oidcauthenticate/request.go b/typedapi/security/oidcauthenticate/request.go new file mode 100644 index 0000000000..12f6446893 --- /dev/null +++ b/typedapi/security/oidcauthenticate/request.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package oidcauthenticate + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package oidcauthenticate +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/oidc_authenticate/Request.ts#L22-L61 +type Request struct { + + // Nonce Associate a client session with an ID token and mitigate replay attacks. + // This value needs to be the same as the one that was provided to the + // `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + // and included in the response to that call. + Nonce string `json:"nonce"` + // Realm The name of the OpenID Connect realm. + // This property is useful in cases where multiple realms are defined. + Realm *string `json:"realm,omitempty"` + // RedirectUri The URL to which the OpenID Connect Provider redirected the User Agent in + // response to an authentication request after a successful authentication. + // This URL must be provided as-is (URL encoded), taken from the body of the + // response or as the value of a location header in the response from the OpenID + // Connect Provider. + RedirectUri string `json:"redirect_uri"` + // State Maintain state between the authentication request and the response. + // This value needs to be the same as the one that was provided to the + // `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + // and included in the response to that call. + State string `json:"state"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Oidcauthenticate request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/oidcauthenticate/response.go b/typedapi/security/oidcauthenticate/response.go new file mode 100644 index 0000000000..08884eda5a --- /dev/null +++ b/typedapi/security/oidcauthenticate/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package oidcauthenticate + +// Response holds the response body struct for the package oidcauthenticate +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/oidc_authenticate/Response.ts#L22-L41 +type Response struct { + + // AccessToken The Elasticsearch access token. + AccessToken string `json:"access_token"` + // ExpiresIn The duration (in seconds) of the tokens. + ExpiresIn int `json:"expires_in"` + // RefreshToken The Elasticsearch refresh token. + RefreshToken string `json:"refresh_token"` + // Type The type of token. + Type string `json:"type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/oidclogout/oidc_logout.go b/typedapi/security/oidclogout/oidc_logout.go index bddb857bb7..70a3fa2a55 100644 --- a/typedapi/security/oidclogout/oidc_logout.go +++ b/typedapi/security/oidclogout/oidc_logout.go @@ -16,22 +16,39 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Invalidates a refresh token and access token that was generated from the -// OpenID Connect Authenticate API +// Logout of OpenID Connect. +// +// Invalidate an access token and a refresh token that were generated as a +// response to the `/_security/oidc/authenticate` API. +// +// If the OpenID Connect authentication realm in Elasticsearch is accordingly +// configured, the response to this call will contain a URI pointing to the end +// session endpoint of the OpenID Connect Provider in order to perform single +// logout. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. package oidclogout import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -46,6 +63,10 @@ type OidcLogout struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -66,8 +87,21 @@ func NewOidcLogoutFunc(tp elastictransport.Interface) NewOidcLogout { } } -// Invalidates a refresh token and access token that was generated from the -// OpenID Connect Authenticate API +// Logout of OpenID Connect. +// +// Invalidate an access token and a refresh token that were generated as a +// response to the `/_security/oidc/authenticate` API. +// +// If the OpenID Connect authentication realm in Elasticsearch is accordingly +// configured, the response to this call will contain a URI pointing to the end +// session endpoint of the OpenID Connect Provider in order to perform single +// logout. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-logout.html func New(tp elastictransport.Interface) *OidcLogout { @@ -75,6 +109,8 @@ func New(tp elastictransport.Interface) *OidcLogout { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -86,6 +122,21 @@ func New(tp elastictransport.Interface) *OidcLogout { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OidcLogout) Raw(raw io.Reader) *OidcLogout { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OidcLogout) Request(req *Request) *OidcLogout { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *OidcLogout) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -95,6 +146,31 @@ func (r *OidcLogout) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OidcLogout: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -184,13 +260,7 @@ func (r OidcLogout) Perform(providedCtx context.Context) (*http.Response, error) } // Do runs the request through the transport, handle the response and returns a oidclogout.Response -func (r OidcLogout) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r OidcLogout) IsSuccess(providedCtx context.Context) (bool, error) { +func (r OidcLogout) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -201,30 +271,46 @@ func (r OidcLogout) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the OidcLogout query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode } - return false, nil + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the OidcLogout headers map. @@ -233,3 +319,73 @@ func (r *OidcLogout) Header(key, value string) *OidcLogout { return r } + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OidcLogout) ErrorTrace(errortrace bool) *OidcLogout { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OidcLogout) FilterPath(filterpaths ...string) *OidcLogout { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OidcLogout) Human(human bool) *OidcLogout { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OidcLogout) Pretty(pretty bool) *OidcLogout { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The access token to be invalidated. +// API name: access_token +func (r *OidcLogout) AccessToken(accesstoken string) *OidcLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AccessToken = accesstoken + + return r +} + +// The refresh token to be invalidated. +// API name: refresh_token +func (r *OidcLogout) RefreshToken(refreshtoken string) *OidcLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshToken = &refreshtoken + + return r +} diff --git a/typedapi/security/oidclogout/request.go b/typedapi/security/oidclogout/request.go new file mode 100644 index 0000000000..7464cab061 --- /dev/null +++ b/typedapi/security/oidclogout/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package oidclogout + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package oidclogout +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/oidc_logout/Request.ts#L22-L52 +type Request struct { + + // AccessToken The access token to be invalidated. + AccessToken string `json:"access_token"` + // RefreshToken The refresh token to be invalidated. + RefreshToken *string `json:"refresh_token,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Oidclogout request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/oidclogout/response.go b/typedapi/security/oidclogout/response.go new file mode 100644 index 0000000000..8edd7daa45 --- /dev/null +++ b/typedapi/security/oidclogout/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package oidclogout + +// Response holds the response body struct for the package oidclogout +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/oidc_logout/Response.ts#L20-L27 +type Response struct { + + // Redirect A URI that points to the end session endpoint of the OpenID Connect Provider + // with all the parameters of the logout request as HTTP GET parameters. + Redirect string `json:"redirect"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go b/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go index 9c031bbbbc..28a48b2a98 100644 --- a/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go +++ b/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go @@ -16,21 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates an OAuth 2.0 authentication request as a URL string +// Prepare OpenID connect authentication. +// +// Create an oAuth 2.0 authentication request as a URL string based on the +// configuration of the OpenID Connect authentication realm in Elasticsearch. +// +// The response of this API is a URL pointing to the Authorization Endpoint of +// the configured OpenID Connect Provider, which can be used to redirect the +// browser of the user in order to continue the authentication process. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. package oidcprepareauthentication import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +62,10 @@ type OidcPrepareAuthentication struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,7 +86,20 @@ func NewOidcPrepareAuthenticationFunc(tp elastictransport.Interface) NewOidcPrep } } -// Creates an OAuth 2.0 authentication request as a URL string +// Prepare OpenID connect authentication. +// +// Create an oAuth 2.0 authentication request as a URL string based on the +// configuration of the OpenID Connect authentication realm in Elasticsearch. +// +// The response of this API is a URL pointing to the Authorization Endpoint of +// the configured OpenID Connect Provider, which can be used to redirect the +// browser of the user in order to continue the authentication process. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-prepare-authentication.html func New(tp elastictransport.Interface) *OidcPrepareAuthentication { @@ -73,6 +107,8 @@ func New(tp elastictransport.Interface) *OidcPrepareAuthentication { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +120,21 @@ func New(tp elastictransport.Interface) *OidcPrepareAuthentication { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OidcPrepareAuthentication) Raw(raw io.Reader) *OidcPrepareAuthentication { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OidcPrepareAuthentication) Request(req *Request) *OidcPrepareAuthentication { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *OidcPrepareAuthentication) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +144,31 @@ func (r *OidcPrepareAuthentication) HttpRequest(ctx context.Context) (*http.Requ var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OidcPrepareAuthentication: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -182,13 +258,7 @@ func (r OidcPrepareAuthentication) Perform(providedCtx context.Context) (*http.R } // Do runs the request through the transport, handle the response and returns a oidcprepareauthentication.Response -func (r OidcPrepareAuthentication) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r OidcPrepareAuthentication) IsSuccess(providedCtx context.Context) (bool, error) { +func (r OidcPrepareAuthentication) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -199,30 +269,46 @@ func (r OidcPrepareAuthentication) IsSuccess(providedCtx context.Context) (bool, ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the OidcPrepareAuthentication query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err } - return false, nil + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the OidcPrepareAuthentication headers map. @@ -231,3 +317,126 @@ func (r *OidcPrepareAuthentication) Header(key, value string) *OidcPrepareAuthen return r } + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OidcPrepareAuthentication) ErrorTrace(errortrace bool) *OidcPrepareAuthentication { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OidcPrepareAuthentication) FilterPath(filterpaths ...string) *OidcPrepareAuthentication { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OidcPrepareAuthentication) Human(human bool) *OidcPrepareAuthentication { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OidcPrepareAuthentication) Pretty(pretty bool) *OidcPrepareAuthentication { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// In the case of a third party initiated single sign on, this is the issuer +// identifier for the OP that the RP is to send the authentication request to. +// It cannot be specified when *realm* is specified. +// One of *realm* or *iss* is required. +// API name: iss +func (r *OidcPrepareAuthentication) Iss(iss string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Iss = &iss + + return r +} + +// In the case of a third party initiated single sign on, it is a string value +// that is included in the authentication request as the *login_hint* parameter. +// This parameter is not valid when *realm* is specified. +// API name: login_hint +func (r *OidcPrepareAuthentication) LoginHint(loginhint string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LoginHint = &loginhint + + return r +} + +// The value used to associate a client session with an ID token and to mitigate +// replay attacks. +// If the caller of the API does not provide a value, Elasticsearch will +// generate one with sufficient entropy and return it in the response. +// API name: nonce +func (r *OidcPrepareAuthentication) Nonce(nonce string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Nonce = &nonce + + return r +} + +// The name of the OpenID Connect realm in Elasticsearch the configuration of +// which should be used in order to generate the authentication request. +// It cannot be specified when *iss* is specified. +// One of *realm* or *iss* is required. +// API name: realm +func (r *OidcPrepareAuthentication) Realm(realm string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = &realm + + return r +} + +// The value used to maintain state between the authentication request and the +// response, typically used as a Cross-Site Request Forgery mitigation. +// If the caller of the API does not provide a value, Elasticsearch will +// generate one with sufficient entropy and return it in the response. +// API name: state +func (r *OidcPrepareAuthentication) State(state string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.State = &state + + return r +} diff --git a/typedapi/security/oidcprepareauthentication/request.go b/typedapi/security/oidcprepareauthentication/request.go new file mode 100644 index 0000000000..a997f9943f --- /dev/null +++ b/typedapi/security/oidcprepareauthentication/request.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package oidcprepareauthentication + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package oidcprepareauthentication +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/oidc_prepare_authentication/Request.ts#L22-L71 +type Request struct { + + // Iss In the case of a third party initiated single sign on, this is the issuer + // identifier for the OP that the RP is to send the authentication request to. + // It cannot be specified when *realm* is specified. + // One of *realm* or *iss* is required. + Iss *string `json:"iss,omitempty"` + // LoginHint In the case of a third party initiated single sign on, it is a string value + // that is included in the authentication request as the *login_hint* parameter. + // This parameter is not valid when *realm* is specified. + LoginHint *string `json:"login_hint,omitempty"` + // Nonce The value used to associate a client session with an ID token and to mitigate + // replay attacks. + // If the caller of the API does not provide a value, Elasticsearch will + // generate one with sufficient entropy and return it in the response. + Nonce *string `json:"nonce,omitempty"` + // Realm The name of the OpenID Connect realm in Elasticsearch the configuration of + // which should be used in order to generate the authentication request. + // It cannot be specified when *iss* is specified. + // One of *realm* or *iss* is required. + Realm *string `json:"realm,omitempty"` + // State The value used to maintain state between the authentication request and the + // response, typically used as a Cross-Site Request Forgery mitigation. + // If the caller of the API does not provide a value, Elasticsearch will + // generate one with sufficient entropy and return it in the response. + State *string `json:"state,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Oidcprepareauthentication request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/oidcprepareauthentication/response.go b/typedapi/security/oidcprepareauthentication/response.go new file mode 100644 index 0000000000..15146de353 --- /dev/null +++ b/typedapi/security/oidcprepareauthentication/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package oidcprepareauthentication + +// Response holds the response body struct for the package oidcprepareauthentication +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/oidc_prepare_authentication/Response.ts#L20-L30 +type Response struct { + Nonce string `json:"nonce"` + Realm string `json:"realm"` + // Redirect A URI that points to the authorization endpoint of the OpenID Connect + // Provider with all the parameters of the authentication request as HTTP GET + // parameters. + Redirect string `json:"redirect"` + State string `json:"state"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/putprivileges/put_privileges.go b/typedapi/security/putprivileges/put_privileges.go index d1a7453866..9beaeacfdf 100644 --- a/typedapi/security/putprivileges/put_privileges.go +++ b/typedapi/security/putprivileges/put_privileges.go @@ -16,9 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Adds or updates application privileges. +// Create or update application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// Application names are formed from a prefix, with an optional suffix that +// conform to the following rules: +// +// * The prefix must begin with a lowercase ASCII letter. +// * The prefix must contain only ASCII letters or digits. +// * The prefix must be at least 3 characters long. +// * If the suffix exists, it must begin with either a dash `-` or `_`. +// * The suffix cannot contain any of the following characters: `\`, `/`, `*`, +// `?`, `"`, `<`, `>`, `|`, `,`, `*`. +// * No part of the name can contain whitespace. +// +// Privilege names must begin with a lowercase ASCII letter and must contain +// only ASCII letters and digits along with the characters `_`, `-`, and `.`. +// +// Action names can contain any number of printable ASCII characters and must +// contain at least one of the following characters: `/`, `*`, `:`. package putprivileges import ( @@ -74,7 +98,31 @@ func NewPutPrivilegesFunc(tp elastictransport.Interface) NewPutPrivileges { } } -// Adds or updates application privileges. +// Create or update application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// Application names are formed from a prefix, with an optional suffix that +// conform to the following rules: +// +// * The prefix must begin with a lowercase ASCII letter. +// * The prefix must contain only ASCII letters or digits. +// * The prefix must be at least 3 characters long. +// * If the suffix exists, it must begin with either a dash `-` or `_`. +// * The suffix cannot contain any of the following characters: `\`, `/`, `*`, +// `?`, `"`, `<`, `>`, `|`, `,`, `*`. +// * No part of the name can contain whitespace. +// +// Privilege names must begin with a lowercase ASCII letter and must contain +// only ASCII letters and digits along with the characters `_`, `-`, and `.`. +// +// Action names can contain any number of printable ASCII characters and must +// contain at least one of the following characters: `/`, `*`, `:`. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html func New(tp elastictransport.Interface) *PutPrivileges { @@ -84,8 +132,6 @@ func New(tp elastictransport.Interface) *PutPrivileges { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { diff --git a/typedapi/security/putprivileges/request.go b/typedapi/security/putprivileges/request.go index f2eb765033..f1483edbf4 100644 --- a/typedapi/security/putprivileges/request.go +++ b/typedapi/security/putprivileges/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putprivileges @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package putprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/put_privileges/SecurityPutPrivilegesRequest.ts#L25-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/put_privileges/SecurityPutPrivilegesRequest.ts#L25-L67 type Request = map[string]map[string]types.PrivilegesActions // NewRequest returns a Request diff --git a/typedapi/security/putprivileges/response.go b/typedapi/security/putprivileges/response.go index 1e686c0188..04066fc862 100644 --- a/typedapi/security/putprivileges/response.go +++ b/typedapi/security/putprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/put_privileges/SecurityPutPrivilegesResponse.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/put_privileges/SecurityPutPrivilegesResponse.ts#L23-L28 type Response map[string]map[string]types.CreatedStatus diff --git a/typedapi/security/putrole/put_role.go b/typedapi/security/putrole/put_role.go index 041ec62049..43fdfc72db 100644 --- a/typedapi/security/putrole/put_role.go +++ b/typedapi/security/putrole/put_role.go @@ -16,12 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// The role management APIs are generally the preferred way to manage roles, -// rather than using file-based role management. +// Create or update roles. +// +// The role management APIs are generally the preferred way to manage roles in +// the native realm, rather than using file-based role management. // The create or update roles API cannot update roles that are defined in roles // files. +// File-based role management is not available in Elastic Serverless. package putrole import ( @@ -86,10 +89,13 @@ func NewPutRoleFunc(tp elastictransport.Interface) NewPutRole { } } -// The role management APIs are generally the preferred way to manage roles, -// rather than using file-based role management. +// Create or update roles. +// +// The role management APIs are generally the preferred way to manage roles in +// the native realm, rather than using file-based role management. // The create or update roles API cannot update roles that are defined in roles // files. +// File-based role management is not available in Elastic Serverless. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html func New(tp elastictransport.Interface) *PutRole { @@ -99,8 +105,6 @@ func New(tp elastictransport.Interface) *PutRole { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -377,71 +381,164 @@ func (r *PutRole) Pretty(pretty bool) *PutRole { return r } -// Applications A list of application privilege entries. +// A list of application privilege entries. // API name: applications -func (r *PutRole) Applications(applications ...types.ApplicationPrivileges) *PutRole { - r.req.Applications = applications +func (r *PutRole) Applications(applications ...types.ApplicationPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range applications { + r.req.Applications = append(r.req.Applications, *v.ApplicationPrivilegesCaster()) + + } return r } -// Cluster A list of cluster privileges. These privileges define the cluster-level +// A list of cluster privileges. These privileges define the cluster-level // actions for users with this role. // API name: cluster func (r *PutRole) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *PutRole { - r.req.Cluster = clusters + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range clusters { + r.req.Cluster = append(r.req.Cluster, v) + + } return r } -// Description Optional description of the role descriptor +// Optional description of the role descriptor // API name: description func (r *PutRole) Description(description string) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Global An object defining global privileges. A global privilege is a form of cluster +// An object defining global privileges. A global privilege is a form of cluster // privilege that is request-aware. Support for global privileges is currently // limited to the management of application privileges. // API name: global func (r *PutRole) Global(global map[string]json.RawMessage) *PutRole { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Global = global + return r +} +func (r *PutRole) AddGlobal(key string, value json.RawMessage) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Global == nil { + r.req.Global = make(map[string]json.RawMessage) + } else { + tmp = r.req.Global + } + + tmp[key] = value + + r.req.Global = tmp return r } -// Indices A list of indices permissions entries. +// A list of indices permissions entries. // API name: indices -func (r *PutRole) Indices(indices ...types.IndicesPrivileges) *PutRole { - r.req.Indices = indices +func (r *PutRole) Indices(indices ...types.IndicesPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + r.req.Indices = append(r.req.Indices, *v.IndicesPrivilegesCaster()) + + } return r } -// Metadata Optional metadata. Within the metadata object, keys that begin with an +// Optional metadata. Within the metadata object, keys that begin with an // underscore (`_`) are reserved for system use. // API name: metadata -func (r *PutRole) Metadata(metadata types.Metadata) *PutRole { - r.req.Metadata = metadata +func (r *PutRole) Metadata(metadata types.MetadataVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } -// RunAs A list of users that the owners of this role can impersonate. *Note*: in +// A list of remote cluster permissions entries. +// API name: remote_cluster +func (r *PutRole) RemoteCluster(remoteclusters ...types.RemoteClusterPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range remoteclusters { + + r.req.RemoteCluster = append(r.req.RemoteCluster, *v.RemoteClusterPrivilegesCaster()) + + } + return r +} + +// A list of remote indices permissions entries. +// +// NOTE: Remote indices are effective for remote clusters configured with the +// API key based model. +// They have no effect for remote clusters configured with the certificate based +// model. +// API name: remote_indices +func (r *PutRole) RemoteIndices(remoteindices ...types.RemoteIndicesPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range remoteindices { + + r.req.RemoteIndices = append(r.req.RemoteIndices, *v.RemoteIndicesPrivilegesCaster()) + + } + return r +} + +// A list of users that the owners of this role can impersonate. *Note*: in // Serverless, the run-as feature is disabled. For API compatibility, you can // still specify an empty `run_as` field, but a non-empty list will be rejected. // API name: run_as func (r *PutRole) RunAs(runas ...string) *PutRole { - r.req.RunAs = runas + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range runas { + + r.req.RunAs = append(r.req.RunAs, v) + } return r } -// TransientMetadata Indicates roles that might be incompatible with the current cluster license, +// Indicates roles that might be incompatible with the current cluster license, // specifically roles with document and field level security. When the cluster // license doesn’t allow certain features for a given role, this parameter is // updated dynamically to list the incompatible features. If `enabled` is @@ -449,8 +546,29 @@ func (r *PutRole) RunAs(runas ...string) *PutRole { // authenticate API. // API name: transient_metadata func (r *PutRole) TransientMetadata(transientmetadata map[string]json.RawMessage) *PutRole { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TransientMetadata = transientmetadata + return r +} + +func (r *PutRole) AddTransientMetadatum(key string, value json.RawMessage) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.TransientMetadata == nil { + r.req.TransientMetadata = make(map[string]json.RawMessage) + } else { + tmp = r.req.TransientMetadata + } + + tmp[key] = value + r.req.TransientMetadata = tmp return r } diff --git a/typedapi/security/putrole/request.go b/typedapi/security/putrole/request.go index e6ea674f09..1f4e972a12 100644 --- a/typedapi/security/putrole/request.go +++ b/typedapi/security/putrole/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putrole @@ -34,7 +34,7 @@ import ( // Request holds the request body struct for the package putrole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/put_role/SecurityPutRoleRequest.ts#L30-L84 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/put_role/SecurityPutRoleRequest.ts#L32-L111 type Request struct { // Applications A list of application privilege entries. @@ -53,6 +53,15 @@ type Request struct { // Metadata Optional metadata. Within the metadata object, keys that begin with an // underscore (`_`) are reserved for system use. Metadata types.Metadata `json:"metadata,omitempty"` + // RemoteCluster A list of remote cluster permissions entries. + RemoteCluster []types.RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of remote indices permissions entries. + // + // NOTE: Remote indices are effective for remote clusters configured with the + // API key based model. + // They have no effect for remote clusters configured with the certificate based + // model. + RemoteIndices []types.RemoteIndicesPrivileges `json:"remote_indices,omitempty"` // RunAs A list of users that the owners of this role can impersonate. *Note*: in // Serverless, the run-as feature is disabled. For API compatibility, you can // still specify an empty `run_as` field, but a non-empty list will be rejected. @@ -142,6 +151,16 @@ func (s *Request) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Metadata", err) } + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + case "run_as": if err := dec.Decode(&s.RunAs); err != nil { return fmt.Errorf("%s | %w", "RunAs", err) diff --git a/typedapi/security/putrole/response.go b/typedapi/security/putrole/response.go index e0f04c7e65..6aa535a0ef 100644 --- a/typedapi/security/putrole/response.go +++ b/typedapi/security/putrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putrole @@ -26,8 +26,10 @@ import ( // Response holds the response body struct for the package putrole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/put_role/SecurityPutRoleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/put_role/SecurityPutRoleResponse.ts#L22-L29 type Response struct { + + // Role When an existing role is updated, `created` is set to `false`. Role types.CreatedStatus `json:"role"` } diff --git a/typedapi/security/putrolemapping/put_role_mapping.go b/typedapi/security/putrolemapping/put_role_mapping.go index 469d8a5f28..64273afd85 100644 --- a/typedapi/security/putrolemapping/put_role_mapping.go +++ b/typedapi/security/putrolemapping/put_role_mapping.go @@ -16,9 +16,47 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates and updates role mappings. +// Create or update role mappings. +// +// Role mappings define which roles are assigned to each user. +// Each mapping has rules that identify users and a list of roles that are +// granted to those users. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. The create or update role mappings API +// cannot update role mappings that are defined in role mapping files. +// +// NOTE: This API does not create roles. Rather, it maps users to existing +// roles. +// Roles can be created by using the create or update roles API or roles files. +// +// **Role templates** +// +// The most common use for role mappings is to create a mapping from a known +// value on the user to a fixed role name. +// For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should +// be given the superuser role in Elasticsearch. +// The `roles` field is used for this purpose. +// +// For more complex needs, it is possible to use Mustache templates to +// dynamically determine the names of the roles that should be granted to the +// user. +// The `role_templates` field is used for this purpose. +// +// NOTE: To use role templates successfully, the relevant scripting feature must +// be enabled. +// Otherwise, all attempts to create a role mapping with role templates fail. +// +// All of the user fields that are available in the role mapping rules are also +// available in the role templates. +// Thus it is possible to assign a user to a role that reflects their username, +// their groups, or the name of the realm to which they authenticated. +// +// By default a template is evaluated to produce a single string that is the +// name of the role which should be assigned to the user. +// If the format of the template is set to "json" then the template is expected +// to produce a JSON string or an array of JSON strings for the role names. package putrolemapping import ( @@ -82,7 +120,45 @@ func NewPutRoleMappingFunc(tp elastictransport.Interface) NewPutRoleMapping { } } -// Creates and updates role mappings. +// Create or update role mappings. +// +// Role mappings define which roles are assigned to each user. +// Each mapping has rules that identify users and a list of roles that are +// granted to those users. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. The create or update role mappings API +// cannot update role mappings that are defined in role mapping files. +// +// NOTE: This API does not create roles. Rather, it maps users to existing +// roles. +// Roles can be created by using the create or update roles API or roles files. +// +// **Role templates** +// +// The most common use for role mappings is to create a mapping from a known +// value on the user to a fixed role name. +// For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should +// be given the superuser role in Elasticsearch. +// The `roles` field is used for this purpose. +// +// For more complex needs, it is possible to use Mustache templates to +// dynamically determine the names of the roles that should be granted to the +// user. +// The `role_templates` field is used for this purpose. +// +// NOTE: To use role templates successfully, the relevant scripting feature must +// be enabled. +// Otherwise, all attempts to create a role mapping with role templates fail. +// +// All of the user fields that are available in the role mapping rules are also +// available in the role templates. +// Thus it is possible to assign a user to a role that reflects their username, +// their groups, or the name of the realm to which they authenticated. +// +// By default a template is evaluated to produce a single string that is the +// name of the role which should be assigned to the user. +// If the format of the template is set to "json" then the template is expected +// to produce a JSON string or an array of JSON strings for the role names. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html func New(tp elastictransport.Interface) *PutRoleMapping { @@ -92,8 +168,6 @@ func New(tp elastictransport.Interface) *PutRoleMapping { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -307,7 +381,9 @@ func (r *PutRoleMapping) Header(key, value string) *PutRoleMapping { return r } -// Name Role-mapping name +// Name The distinct name that identifies the role mapping. +// The name is used solely as an identifier to facilitate interaction via the +// API; it does not affect the behavior of the mapping in any way. // API Name: name func (r *PutRoleMapping) _name(name string) *PutRoleMapping { r.paramSet |= nameMask @@ -370,45 +446,93 @@ func (r *PutRoleMapping) Pretty(pretty bool) *PutRoleMapping { return r } +// Mappings that have `enabled` set to `false` are ignored when role mapping is +// performed. // API name: enabled func (r *PutRoleMapping) Enabled(enabled bool) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Enabled = &enabled return r } +// Additional metadata that helps define which roles are assigned to each user. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. // API name: metadata -func (r *PutRoleMapping) Metadata(metadata types.Metadata) *PutRoleMapping { - r.req.Metadata = metadata +func (r *PutRoleMapping) Metadata(metadata types.MetadataVariant) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } +// A list of Mustache templates that will be evaluated to determine the roles +// names that should granted to the users that match the role mapping rules. +// Exactly one of `roles` or `role_templates` must be specified. // API name: role_templates -func (r *PutRoleMapping) RoleTemplates(roletemplates ...types.RoleTemplate) *PutRoleMapping { - r.req.RoleTemplates = roletemplates +func (r *PutRoleMapping) RoleTemplates(roletemplates ...types.RoleTemplateVariant) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roletemplates { + r.req.RoleTemplates = append(r.req.RoleTemplates, *v.RoleTemplateCaster()) + + } return r } +// A list of role names that are granted to the users that match the role +// mapping rules. +// Exactly one of `roles` or `role_templates` must be specified. // API name: roles func (r *PutRoleMapping) Roles(roles ...string) *PutRoleMapping { - r.req.Roles = roles + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roles { + + r.req.Roles = append(r.req.Roles, v) + } return r } +// The rules that determine which users should be matched by the mapping. +// A rule is a logical condition that is expressed by using a JSON DSL. // API name: rules -func (r *PutRoleMapping) Rules(rules *types.RoleMappingRule) *PutRoleMapping { +func (r *PutRoleMapping) Rules(rules types.RoleMappingRuleVariant) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Rules = rules + r.req.Rules = rules.RoleMappingRuleCaster() return r } // API name: run_as func (r *PutRoleMapping) RunAs(runas ...string) *PutRoleMapping { - r.req.RunAs = runas + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range runas { + + r.req.RunAs = append(r.req.RunAs, v) + } return r } diff --git a/typedapi/security/putrolemapping/request.go b/typedapi/security/putrolemapping/request.go index 6f3e21482e..e8e3019438 100644 --- a/typedapi/security/putrolemapping/request.go +++ b/typedapi/security/putrolemapping/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putrolemapping @@ -33,14 +33,28 @@ import ( // Request holds the request body struct for the package putrolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/put_role_mapping/SecurityPutRoleMappingRequest.ts#L25-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/put_role_mapping/SecurityPutRoleMappingRequest.ts#L25-L103 type Request struct { - Enabled *bool `json:"enabled,omitempty"` - Metadata types.Metadata `json:"metadata,omitempty"` - RoleTemplates []types.RoleTemplate `json:"role_templates,omitempty"` - Roles []string `json:"roles,omitempty"` - Rules *types.RoleMappingRule `json:"rules,omitempty"` - RunAs []string `json:"run_as,omitempty"` + + // Enabled Mappings that have `enabled` set to `false` are ignored when role mapping is + // performed. + Enabled *bool `json:"enabled,omitempty"` + // Metadata Additional metadata that helps define which roles are assigned to each user. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + Metadata types.Metadata `json:"metadata,omitempty"` + // RoleTemplates A list of Mustache templates that will be evaluated to determine the roles + // names that should granted to the users that match the role mapping rules. + // Exactly one of `roles` or `role_templates` must be specified. + RoleTemplates []types.RoleTemplate `json:"role_templates,omitempty"` + // Roles A list of role names that are granted to the users that match the role + // mapping rules. + // Exactly one of `roles` or `role_templates` must be specified. + Roles []string `json:"roles,omitempty"` + // Rules The rules that determine which users should be matched by the mapping. + // A rule is a logical condition that is expressed by using a JSON DSL. + Rules *types.RoleMappingRule `json:"rules,omitempty"` + RunAs []string `json:"run_as,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/putrolemapping/response.go b/typedapi/security/putrolemapping/response.go index 247ffaaa7c..381608e39a 100644 --- a/typedapi/security/putrolemapping/response.go +++ b/typedapi/security/putrolemapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putrolemapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putrolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/put_role_mapping/SecurityPutRoleMappingResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/put_role_mapping/SecurityPutRoleMappingResponse.ts#L22-L24 type Response struct { Created *bool `json:"created,omitempty"` RoleMapping types.CreatedStatus `json:"role_mapping"` diff --git a/typedapi/security/putuser/put_user.go b/typedapi/security/putuser/put_user.go index 71b70590a9..f1b34370e0 100644 --- a/typedapi/security/putuser/put_user.go +++ b/typedapi/security/putuser/put_user.go @@ -16,10 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Adds and updates users in the native realm. These users are commonly referred -// to as native users. +// Create or update users. +// +// Add and update users in the native realm. +// A password is required for adding a new user but is optional when updating an +// existing user. +// To change a user's password without updating any other fields, use the change +// password API. package putuser import ( @@ -83,8 +88,13 @@ func NewPutUserFunc(tp elastictransport.Interface) NewPutUser { } } -// Adds and updates users in the native realm. These users are commonly referred -// to as native users. +// Create or update users. +// +// Add and update users in the native realm. +// A password is required for adding a new user but is optional when updating an +// existing user. +// To change a user's password without updating any other fields, use the change +// password API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html func New(tp elastictransport.Interface) *PutUser { @@ -94,8 +104,6 @@ func New(tp elastictransport.Interface) *PutUser { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -309,7 +317,12 @@ func (r *PutUser) Header(key, value string) *PutUser { return r } -// Username The username of the User +// Username An identifier for the user. +// +// NOTE: Usernames must be at least 1 and no more than 507 characters. +// They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, +// punctuation, and printable symbols in the Basic Latin (ASCII) block. +// Leading or trailing whitespace is not allowed. // API Name: username func (r *PutUser) _username(username string) *PutUser { r.paramSet |= usernameMask @@ -318,9 +331,9 @@ func (r *PutUser) _username(username string) *PutUser { return r } -// Refresh If `true` (the default) then refresh the affected shards to make this -// operation visible to search, if `wait_for` then wait for a refresh to make -// this operation visible to search, if `false` then do nothing with refreshes. +// Refresh Valid values are `true`, `false`, and `wait_for`. +// These values have the same meaning as in the index API, but the default value +// for this API is true. // API name: refresh func (r *PutUser) Refresh(refresh refresh.Refresh) *PutUser { r.values.Set("refresh", refresh.String()) @@ -372,52 +385,111 @@ func (r *PutUser) Pretty(pretty bool) *PutUser { return r } +// The email of the user. // API name: email func (r *PutUser) Email(email string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Email = &email return r } +// Specifies whether the user is enabled. // API name: enabled func (r *PutUser) Enabled(enabled bool) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Enabled = &enabled return r } +// The full name of the user. // API name: full_name func (r *PutUser) FullName(fullname string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FullName = &fullname return r } +// Arbitrary metadata that you want to associate with the user. // API name: metadata -func (r *PutUser) Metadata(metadata types.Metadata) *PutUser { - r.req.Metadata = metadata +func (r *PutUser) Metadata(metadata types.MetadataVariant) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } +// The user's password. +// Passwords must be at least 6 characters long. +// When adding a user, one of `password` or `password_hash` is required. +// When updating an existing user, the password is optional, so that other +// fields on the user (such as their roles) may be updated without modifying the +// user's password // API name: password func (r *PutUser) Password(password string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Password = &password return r } +// A hash of the user's password. +// This must be produced using the same hashing algorithm as has been configured +// for password storage. +// For more details, see the explanation of the +// `xpack.security.authc.password_hashing.algorithm` setting in the user cache +// and password hash algorithm documentation. +// Using this parameter allows the client to pre-hash the password for +// performance and/or confidentiality reasons. +// The `password` parameter and the `password_hash` parameter cannot be used in +// the same request. // API name: password_hash func (r *PutUser) PasswordHash(passwordhash string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.PasswordHash = &passwordhash return r } +// A set of roles the user has. +// The roles determine the user's access permissions. +// To create a user without any roles, specify an empty list (`[]`). // API name: roles func (r *PutUser) Roles(roles ...string) *PutUser { - r.req.Roles = roles + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roles { + r.req.Roles = append(r.req.Roles, v) + + } return r } diff --git a/typedapi/security/putuser/request.go b/typedapi/security/putuser/request.go index 9b7f4e721f..0e982a678e 100644 --- a/typedapi/security/putuser/request.go +++ b/typedapi/security/putuser/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putuser @@ -33,16 +33,40 @@ import ( // Request holds the request body struct for the package putuser // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/put_user/SecurityPutUserRequest.ts#L23-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/put_user/SecurityPutUserRequest.ts#L23-L101 type Request struct { - Email *string `json:"email,omitempty"` - Enabled *bool `json:"enabled,omitempty"` - FullName *string `json:"full_name,omitempty"` - Metadata types.Metadata `json:"metadata,omitempty"` - Password *string `json:"password,omitempty"` - PasswordHash *string `json:"password_hash,omitempty"` - Roles []string `json:"roles,omitempty"` - Username *string `json:"username,omitempty"` + + // Email The email of the user. + Email *string `json:"email,omitempty"` + // Enabled Specifies whether the user is enabled. + Enabled *bool `json:"enabled,omitempty"` + // FullName The full name of the user. + FullName *string `json:"full_name,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the user. + Metadata types.Metadata `json:"metadata,omitempty"` + // Password The user's password. + // Passwords must be at least 6 characters long. + // When adding a user, one of `password` or `password_hash` is required. + // When updating an existing user, the password is optional, so that other + // fields on the user (such as their roles) may be updated without modifying the + // user's password + Password *string `json:"password,omitempty"` + // PasswordHash A hash of the user's password. + // This must be produced using the same hashing algorithm as has been configured + // for password storage. + // For more details, see the explanation of the + // `xpack.security.authc.password_hashing.algorithm` setting in the user cache + // and password hash algorithm documentation. + // Using this parameter allows the client to pre-hash the password for + // performance and/or confidentiality reasons. + // The `password` parameter and the `password_hash` parameter cannot be used in + // the same request. + PasswordHash *string `json:"password_hash,omitempty"` + // Roles A set of roles the user has. + // The roles determine the user's access permissions. + // To create a user without any roles, specify an empty list (`[]`). + Roles []string `json:"roles,omitempty"` + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/typedapi/security/putuser/response.go b/typedapi/security/putuser/response.go index fa3d8ef5a8..302a37e5fe 100644 --- a/typedapi/security/putuser/response.go +++ b/typedapi/security/putuser/response.go @@ -16,14 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putuser // Response holds the response body struct for the package putuser // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/put_user/SecurityPutUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/put_user/SecurityPutUserResponse.ts#L20-L28 type Response struct { + + // Created A successful call returns a JSON structure that shows whether the user has + // been created or updated. + // When an existing user is updated, `created` is set to `false`. Created bool `json:"created"` } diff --git a/typedapi/security/queryapikeys/query_api_keys.go b/typedapi/security/queryapikeys/query_api_keys.go index ccc082e5b2..2855676bff 100644 --- a/typedapi/security/queryapikeys/query_api_keys.go +++ b/typedapi/security/queryapikeys/query_api_keys.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Query API keys. -// Retrieves a paginated list of API keys and their information. You can -// optionally filter the results with a query. +// Find API keys with a query. +// +// Get a paginated list of API keys and their information. +// You can optionally filter the results with a query. +// +// To use this API, you must have at least the `manage_own_api_key` or the +// `read_security` cluster privileges. +// If you have only the `manage_own_api_key` privilege, this API returns only +// the API keys that you own. +// If you have the `read_security`, `manage_api_key`, or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. package queryapikeys import ( @@ -75,9 +84,18 @@ func NewQueryApiKeysFunc(tp elastictransport.Interface) NewQueryApiKeys { } } -// Query API keys. -// Retrieves a paginated list of API keys and their information. You can -// optionally filter the results with a query. +// Find API keys with a query. +// +// Get a paginated list of API keys and their information. +// You can optionally filter the results with a query. +// +// To use this API, you must have at least the `manage_own_api_key` or the +// `read_security` cluster privileges. +// If you have only the `manage_own_api_key` privilege, this API returns only +// the API keys that you own. +// If you have the `read_security`, `manage_api_key`, or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-api-key.html func New(tp elastictransport.Interface) *QueryApiKeys { @@ -87,8 +105,6 @@ func New(tp elastictransport.Interface) *QueryApiKeys { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -303,7 +319,10 @@ func (r *QueryApiKeys) Header(key, value string) *QueryApiKeys { // WithLimitedBy Return the snapshot of the owner user's role descriptors associated with the // API key. // An API key's actual permission is the intersection of its assigned role -// descriptors and the owner user's role descriptors. +// descriptors and the owner user's role descriptors (effectively limited by +// it). +// An API key cannot retrieve any API key’s limited-by role descriptors +// (including itself) unless it has `manage_api_key` or higher privileges. // API name: with_limited_by func (r *QueryApiKeys) WithLimitedBy(withlimitedby bool) *QueryApiKeys { r.values.Set("with_limited_by", strconv.FormatBool(withlimitedby)) @@ -311,8 +330,10 @@ func (r *QueryApiKeys) WithLimitedBy(withlimitedby bool) *QueryApiKeys { return r } -// WithProfileUid Determines whether to also retrieve the profile uid, for the API key owner -// principal, if it exists. +// WithProfileUid Determines whether to also retrieve the profile UID for the API key owner +// principal. +// If it exists, the profile UID is returned under the `profile_uid` response +// field for each API key. // API name: with_profile_uid func (r *QueryApiKeys) WithProfileUid(withprofileuid bool) *QueryApiKeys { r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) @@ -373,7 +394,7 @@ func (r *QueryApiKeys) Pretty(pretty bool) *QueryApiKeys { return r } -// Aggregations Any aggregations to run over the corpus of returned API keys. +// Any aggregations to run over the corpus of returned API keys. // Aggregations and queries work together. Aggregations are computed only on the // API keys that match the query. // This supports only a subset of aggregation types, namely: `terms`, `range`, @@ -383,24 +404,51 @@ func (r *QueryApiKeys) Pretty(pretty bool) *QueryApiKeys { // works with. // API name: aggregations func (r *QueryApiKeys) Aggregations(aggregations map[string]types.ApiKeyAggregationContainer) *QueryApiKeys { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Aggregations = aggregations + return r +} + +func (r *QueryApiKeys) AddAggregation(key string, value types.ApiKeyAggregationContainerVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ApiKeyAggregationContainer + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.ApiKeyAggregationContainer) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.ApiKeyAggregationContainerCaster() + r.req.Aggregations = tmp return r } -// From Starting document offset. -// By default, you cannot page through more than 10,000 hits using the from and -// size parameters. +// The starting document offset. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: from func (r *QueryApiKeys) From(from int) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Query A query to filter which API keys to return. +// A query to filter which API keys to return. // If the query parameter is missing, it is equivalent to a `match_all` query. // The query supports a subset of query types, including `match_all`, `bool`, // `term`, `terms`, `match`, @@ -409,39 +457,73 @@ func (r *QueryApiKeys) From(from int) *QueryApiKeys { // `id`, `type`, `name`, // `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, // and `metadata`. +// +// NOTE: The queryable string values associated with API keys are internally +// mapped as keywords. +// Consequently, if no `analyzer` parameter is specified for a `match` query, +// then the provided match query string is interpreted as a single keyword +// value. +// Such a match query is hence equivalent to a `term` query. // API name: query -func (r *QueryApiKeys) Query(query *types.ApiKeyQueryContainer) *QueryApiKeys { +func (r *QueryApiKeys) Query(query types.ApiKeyQueryContainerVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.ApiKeyQueryContainerCaster() return r } -// SearchAfter Search after definition +// The search after definition. // API name: search_after -func (r *QueryApiKeys) SearchAfter(sortresults ...types.FieldValue) *QueryApiKeys { - r.req.SearchAfter = sortresults +func (r *QueryApiKeys) SearchAfter(sortresults ...types.FieldValueVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// Size The number of hits to return. +// The number of hits to return. +// It must not be negative. +// The `size` parameter can be set to `0`, in which case no API key matches are +// returned, only the aggregation results. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: size func (r *QueryApiKeys) Size(size int) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort Other than `id`, all public fields of an API key are eligible for sorting. +// The sort definition. +// Other than `id`, all public fields of an API key are eligible for sorting. // In addition, sort can also be applied to the `_doc` field to sort by index // order. // API name: sort -func (r *QueryApiKeys) Sort(sorts ...types.SortCombinations) *QueryApiKeys { - r.req.Sort = sorts +func (r *QueryApiKeys) Sort(sorts ...types.SortCombinationsVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } diff --git a/typedapi/security/queryapikeys/request.go b/typedapi/security/queryapikeys/request.go index dcdfd79bb6..8c2ba0baff 100644 --- a/typedapi/security/queryapikeys/request.go +++ b/typedapi/security/queryapikeys/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package queryapikeys @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package queryapikeys // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_api_keys/QueryApiKeysRequest.ts#L26-L100 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_api_keys/QueryApiKeysRequest.ts#L26-L124 type Request struct { // Aggregations Any aggregations to run over the corpus of returned API keys. @@ -45,9 +45,10 @@ type Request struct { // Additionally, aggregations only run over the same subset of fields that query // works with. Aggregations map[string]types.ApiKeyAggregationContainer `json:"aggregations,omitempty"` - // From Starting document offset. - // By default, you cannot page through more than 10,000 hits using the from and - // size parameters. + // From The starting document offset. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. // To page through more hits, use the `search_after` parameter. From *int `json:"from,omitempty"` // Query A query to filter which API keys to return. @@ -59,15 +60,26 @@ type Request struct { // `id`, `type`, `name`, // `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, // and `metadata`. + // + // NOTE: The queryable string values associated with API keys are internally + // mapped as keywords. + // Consequently, if no `analyzer` parameter is specified for a `match` query, + // then the provided match query string is interpreted as a single keyword + // value. + // Such a match query is hence equivalent to a `term` query. Query *types.ApiKeyQueryContainer `json:"query,omitempty"` - // SearchAfter Search after definition + // SearchAfter The search after definition. SearchAfter []types.FieldValue `json:"search_after,omitempty"` // Size The number of hits to return. + // It must not be negative. + // The `size` parameter can be set to `0`, in which case no API key matches are + // returned, only the aggregation results. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. Size *int `json:"size,omitempty"` - // Sort Other than `id`, all public fields of an API key are eligible for sorting. + // Sort The sort definition. + // Other than `id`, all public fields of an API key are eligible for sorting. // In addition, sort can also be applied to the `_doc` field to sort by index // order. Sort []types.SortCombinations `json:"sort,omitempty"` diff --git a/typedapi/security/queryapikeys/response.go b/typedapi/security/queryapikeys/response.go index 8645b88bc4..8468864698 100644 --- a/typedapi/security/queryapikeys/response.go +++ b/typedapi/security/queryapikeys/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package queryapikeys @@ -34,7 +34,7 @@ import ( // Response holds the response body struct for the package queryapikeys // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_api_keys/QueryApiKeysResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_api_keys/QueryApiKeysResponse.ts#L26-L45 type Response struct { // Aggregations The aggregations result, if requested. diff --git a/typedapi/security/queryrole/query_role.go b/typedapi/security/queryrole/query_role.go index 89ca2dd1b9..3d08812b43 100644 --- a/typedapi/security/queryrole/query_role.go +++ b/typedapi/security/queryrole/query_role.go @@ -16,10 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves roles in a paginated manner. You can optionally filter the results -// with a query. +// Find roles with a query. +// +// Get roles in a paginated manner. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The query roles API does not retrieve roles that are defined in roles files, +// nor built-in ones. +// You can optionally filter the results with a query. +// Also, the results can be paginated and sorted. package queryrole import ( @@ -74,8 +81,15 @@ func NewQueryRoleFunc(tp elastictransport.Interface) NewQueryRole { } } -// Retrieves roles in a paginated manner. You can optionally filter the results -// with a query. +// Find roles with a query. +// +// Get roles in a paginated manner. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The query roles API does not retrieve roles that are defined in roles files, +// nor built-in ones. +// You can optionally filter the results with a query. +// Also, the results can be paginated and sorted. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-role.html func New(tp elastictransport.Interface) *QueryRole { @@ -85,8 +99,6 @@ func New(tp elastictransport.Interface) *QueryRole { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -340,59 +352,90 @@ func (r *QueryRole) Pretty(pretty bool) *QueryRole { return r } -// From Starting document offset. -// By default, you cannot page through more than 10,000 hits using the from and -// size parameters. +// The starting document offset. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: from func (r *QueryRole) From(from int) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Query A query to filter which roles to return. +// A query to filter which roles to return. // If the query parameter is missing, it is equivalent to a `match_all` query. // The query supports a subset of query types, including `match_all`, `bool`, // `term`, `terms`, `match`, // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. // You can query the following information associated with roles: `name`, // `description`, `metadata`, -// `applications.application`, `applications.privileges`, +// `applications.application`, `applications.privileges`, and // `applications.resources`. // API name: query -func (r *QueryRole) Query(query *types.RoleQueryContainer) *QueryRole { +func (r *QueryRole) Query(query types.RoleQueryContainerVariant) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.RoleQueryContainerCaster() return r } -// SearchAfter Search after definition +// The search after definition. // API name: search_after -func (r *QueryRole) SearchAfter(sortresults ...types.FieldValue) *QueryRole { - r.req.SearchAfter = sortresults +func (r *QueryRole) SearchAfter(sortresults ...types.FieldValueVariant) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// Size The number of hits to return. +// The number of hits to return. +// It must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: size func (r *QueryRole) Size(size int) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort All public fields of a role are eligible for sorting. +// The sort definition. +// You can sort on `username`, `roles`, or `enabled`. // In addition, sort can also be applied to the `_doc` field to sort by index // order. // API name: sort -func (r *QueryRole) Sort(sorts ...types.SortCombinations) *QueryRole { - r.req.Sort = sorts +func (r *QueryRole) Sort(sorts ...types.SortCombinationsVariant) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } diff --git a/typedapi/security/queryrole/request.go b/typedapi/security/queryrole/request.go index b37a22d9ef..616f93d3bd 100644 --- a/typedapi/security/queryrole/request.go +++ b/typedapi/security/queryrole/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package queryrole @@ -33,12 +33,13 @@ import ( // Request holds the request body struct for the package queryrole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_role/QueryRolesRequest.ts#L25-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_role/QueryRolesRequest.ts#L25-L85 type Request struct { - // From Starting document offset. - // By default, you cannot page through more than 10,000 hits using the from and - // size parameters. + // From The starting document offset. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. // To page through more hits, use the `search_after` parameter. From *int `json:"from,omitempty"` // Query A query to filter which roles to return. @@ -48,17 +49,19 @@ type Request struct { // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. // You can query the following information associated with roles: `name`, // `description`, `metadata`, - // `applications.application`, `applications.privileges`, + // `applications.application`, `applications.privileges`, and // `applications.resources`. Query *types.RoleQueryContainer `json:"query,omitempty"` - // SearchAfter Search after definition + // SearchAfter The search after definition. SearchAfter []types.FieldValue `json:"search_after,omitempty"` // Size The number of hits to return. + // It must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. Size *int `json:"size,omitempty"` - // Sort All public fields of a role are eligible for sorting. + // Sort The sort definition. + // You can sort on `username`, `roles`, or `enabled`. // In addition, sort can also be applied to the `_doc` field to sort by index // order. Sort []types.SortCombinations `json:"sort,omitempty"` diff --git a/typedapi/security/queryrole/response.go b/typedapi/security/queryrole/response.go index d4f7bae0e1..e8edd44644 100644 --- a/typedapi/security/queryrole/response.go +++ b/typedapi/security/queryrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package queryrole @@ -26,12 +26,19 @@ import ( // Response holds the response body struct for the package queryrole // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_role/QueryRolesResponse.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_role/QueryRolesResponse.ts#L23-L43 type Response struct { // Count The number of roles returned in the response. Count int `json:"count"` - // Roles The list of roles. + // Roles A list of roles that match the query. + // The returned role format is an extension of the role definition format. + // It adds the `transient_metadata.enabled` and the `_sort` fields. + // `transient_metadata.enabled` is set to `false` in case the role is + // automatically disabled, for example when the role grants privileges that are + // not allowed by the installed license. + // `_sort` is present when the search query sorts on some field. + // It contains the array of values that have been used for sorting. Roles []types.QueryRole `json:"roles"` // Total The total number of roles found. Total int `json:"total"` diff --git a/typedapi/security/queryuser/query_user.go b/typedapi/security/queryuser/query_user.go index 7eef72b985..fdc4724aac 100644 --- a/typedapi/security/queryuser/query_user.go +++ b/typedapi/security/queryuser/query_user.go @@ -16,10 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information for Users in a paginated manner. You can optionally -// filter the results with a query. +// Find users with a query. +// +// Get information for users in a paginated manner. +// You can optionally filter the results with a query. +// +// NOTE: As opposed to the get user API, built-in users are excluded from the +// result. +// This API is only for native users. package queryuser import ( @@ -74,8 +80,14 @@ func NewQueryUserFunc(tp elastictransport.Interface) NewQueryUser { } } -// Retrieves information for Users in a paginated manner. You can optionally -// filter the results with a query. +// Find users with a query. +// +// Get information for users in a paginated manner. +// You can optionally filter the results with a query. +// +// NOTE: As opposed to the get user API, built-in users are excluded from the +// result. +// This API is only for native users. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-user.html func New(tp elastictransport.Interface) *QueryUser { @@ -85,8 +97,6 @@ func New(tp elastictransport.Interface) *QueryUser { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -296,8 +306,8 @@ func (r *QueryUser) Header(key, value string) *QueryUser { return r } -// WithProfileUid If true will return the User Profile ID for the users in the query result, if -// any. +// WithProfileUid Determines whether to retrieve the user profile UID, if it exists, for the +// users. // API name: with_profile_uid func (r *QueryUser) WithProfileUid(withprofileuid bool) *QueryUser { r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) @@ -349,57 +359,88 @@ func (r *QueryUser) Pretty(pretty bool) *QueryUser { return r } -// From Starting document offset. -// By default, you cannot page through more than 10,000 hits using the from and -// size parameters. +// The starting document offset. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: from func (r *QueryUser) From(from int) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Query A query to filter which users to return. +// A query to filter which users to return. // If the query parameter is missing, it is equivalent to a `match_all` query. // The query supports a subset of query types, including `match_all`, `bool`, // `term`, `terms`, `match`, // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. // You can query the following information associated with user: `username`, -// `roles`, `enabled` +// `roles`, `enabled`, `full_name`, and `email`. // API name: query -func (r *QueryUser) Query(query *types.UserQueryContainer) *QueryUser { +func (r *QueryUser) Query(query types.UserQueryContainerVariant) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.UserQueryContainerCaster() return r } -// SearchAfter Search after definition +// The search after definition // API name: search_after -func (r *QueryUser) SearchAfter(sortresults ...types.FieldValue) *QueryUser { - r.req.SearchAfter = sortresults +func (r *QueryUser) SearchAfter(sortresults ...types.FieldValueVariant) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// Size The number of hits to return. +// The number of hits to return. +// It must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. // API name: size func (r *QueryUser) Size(size int) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort Fields eligible for sorting are: username, roles, enabled +// The sort definition. +// Fields eligible for sorting are: `username`, `roles`, `enabled`. // In addition, sort can also be applied to the `_doc` field to sort by index // order. // API name: sort -func (r *QueryUser) Sort(sorts ...types.SortCombinations) *QueryUser { - r.req.Sort = sorts +func (r *QueryUser) Sort(sorts ...types.SortCombinationsVariant) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } diff --git a/typedapi/security/queryuser/request.go b/typedapi/security/queryuser/request.go index ccf64681a0..eb672df5aa 100644 --- a/typedapi/security/queryuser/request.go +++ b/typedapi/security/queryuser/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package queryuser @@ -33,12 +33,13 @@ import ( // Request holds the request body struct for the package queryuser // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_user/SecurityQueryUserRequest.ts#L25-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_user/SecurityQueryUserRequest.ts#L25-L91 type Request struct { - // From Starting document offset. - // By default, you cannot page through more than 10,000 hits using the from and - // size parameters. + // From The starting document offset. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. // To page through more hits, use the `search_after` parameter. From *int `json:"from,omitempty"` // Query A query to filter which users to return. @@ -47,16 +48,18 @@ type Request struct { // `term`, `terms`, `match`, // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. // You can query the following information associated with user: `username`, - // `roles`, `enabled` + // `roles`, `enabled`, `full_name`, and `email`. Query *types.UserQueryContainer `json:"query,omitempty"` - // SearchAfter Search after definition + // SearchAfter The search after definition SearchAfter []types.FieldValue `json:"search_after,omitempty"` // Size The number of hits to return. + // It must not be negative. // By default, you cannot page through more than 10,000 hits using the `from` // and `size` parameters. // To page through more hits, use the `search_after` parameter. Size *int `json:"size,omitempty"` - // Sort Fields eligible for sorting are: username, roles, enabled + // Sort The sort definition. + // Fields eligible for sorting are: `username`, `roles`, `enabled`. // In addition, sort can also be applied to the `_doc` field to sort by index // order. Sort []types.SortCombinations `json:"sort,omitempty"` diff --git a/typedapi/security/queryuser/response.go b/typedapi/security/queryuser/response.go index 3e63a0d7f3..1d59d5fad7 100644 --- a/typedapi/security/queryuser/response.go +++ b/typedapi/security/queryuser/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package queryuser @@ -26,14 +26,14 @@ import ( // Response holds the response body struct for the package queryuser // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_user/SecurityQueryUserResponse.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_user/SecurityQueryUserResponse.ts#L23-L38 type Response struct { // Count The number of users returned in the response. Count int `json:"count"` // Total The total number of users found. Total int `json:"total"` - // Users A list of user information. + // Users A list of users that match the query. Users []types.QueryUser `json:"users"` } diff --git a/typedapi/security/samlauthenticate/request.go b/typedapi/security/samlauthenticate/request.go index 53af7b2dc7..0c3c65bc9f 100644 --- a/typedapi/security/samlauthenticate/request.go +++ b/typedapi/security/samlauthenticate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samlauthenticate @@ -31,13 +31,13 @@ import ( // Request holds the request body struct for the package samlauthenticate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_authenticate/Request.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_authenticate/Request.ts#L23-L61 type Request struct { - // Content The SAML response as it was sent by the user’s browser, usually a Base64 + // Content The SAML response as it was sent by the user's browser, usually a Base64 // encoded XML document. Content string `json:"content"` - // Ids A json array with all the valid SAML Request Ids that the caller of the API + // Ids A JSON array with all the valid SAML Request Ids that the caller of the API // has for the current user. Ids []string `json:"ids"` // Realm The name of the realm that should authenticate the SAML response. Useful in diff --git a/typedapi/security/samlauthenticate/response.go b/typedapi/security/samlauthenticate/response.go index 2b09b25224..0084725ea6 100644 --- a/typedapi/security/samlauthenticate/response.go +++ b/typedapi/security/samlauthenticate/response.go @@ -16,19 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samlauthenticate // Response holds the response body struct for the package samlauthenticate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_authenticate/Response.ts#L22-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_authenticate/Response.ts#L22-L45 type Response struct { - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - Realm string `json:"realm"` + + // AccessToken The access token that was generated by Elasticsearch. + AccessToken string `json:"access_token"` + // ExpiresIn The amount of time (in seconds) left until the token expires. + ExpiresIn int `json:"expires_in"` + // Realm The name of the realm where the user was authenticated. + Realm string `json:"realm"` + // RefreshToken The refresh token that was generated by Elasticsearch. RefreshToken string `json:"refresh_token"` - Username string `json:"username"` + // Username The authenticated user's name. + Username string `json:"username"` } // NewResponse returns a Response diff --git a/typedapi/security/samlauthenticate/saml_authenticate.go b/typedapi/security/samlauthenticate/saml_authenticate.go index 39cd77eed9..7f727aa36d 100644 --- a/typedapi/security/samlauthenticate/saml_authenticate.go +++ b/typedapi/security/samlauthenticate/saml_authenticate.go @@ -16,9 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Submits a SAML Response message to Elasticsearch for consumption. +// Authenticate SAML. +// +// Submit a SAML response message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML message that is submitted can be: +// +// * A response to a SAML authentication request that was previously created +// using the SAML prepare authentication API. +// * An unsolicited SAML message in the case of an IdP-initiated single sign-on +// (SSO) flow. +// +// In either case, the SAML message needs to be a base64 encoded XML document +// with a root element of ``. +// +// After successful validation, Elasticsearch responds with an Elasticsearch +// internal access token and refresh token that can be subsequently used for +// authentication. +// This API endpoint essentially exchanges SAML responses that indicate +// successful authentication in the IdP for Elasticsearch access and refresh +// tokens, which can be used for authentication against Elasticsearch. package samlauthenticate import ( @@ -73,7 +97,31 @@ func NewSamlAuthenticateFunc(tp elastictransport.Interface) NewSamlAuthenticate } } -// Submits a SAML Response message to Elasticsearch for consumption. +// Authenticate SAML. +// +// Submit a SAML response message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML message that is submitted can be: +// +// * A response to a SAML authentication request that was previously created +// using the SAML prepare authentication API. +// * An unsolicited SAML message in the case of an IdP-initiated single sign-on +// (SSO) flow. +// +// In either case, the SAML message needs to be a base64 encoded XML document +// with a root element of ``. +// +// After successful validation, Elasticsearch responds with an Elasticsearch +// internal access token and refresh token that can be subsequently used for +// authentication. +// This API endpoint essentially exchanges SAML responses that indicate +// successful authentication in the IdP for Elasticsearch access and refresh +// tokens, which can be used for authentication against Elasticsearch. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-authenticate.html func New(tp elastictransport.Interface) *SamlAuthenticate { @@ -83,8 +131,6 @@ func New(tp elastictransport.Interface) *SamlAuthenticate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -338,29 +384,42 @@ func (r *SamlAuthenticate) Pretty(pretty bool) *SamlAuthenticate { return r } -// Content The SAML response as it was sent by the user’s browser, usually a Base64 +// The SAML response as it was sent by the user's browser, usually a Base64 // encoded XML document. // API name: content func (r *SamlAuthenticate) Content(content string) *SamlAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Content = content return r } -// Ids A json array with all the valid SAML Request Ids that the caller of the API +// A JSON array with all the valid SAML Request Ids that the caller of the API // has for the current user. // API name: ids func (r *SamlAuthenticate) Ids(ids ...string) *SamlAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ids = ids return r } -// Realm The name of the realm that should authenticate the SAML response. Useful in +// The name of the realm that should authenticate the SAML response. Useful in // cases where many SAML realms are defined. // API name: realm func (r *SamlAuthenticate) Realm(realm string) *SamlAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Realm = &realm diff --git a/typedapi/security/samlcompletelogout/request.go b/typedapi/security/samlcompletelogout/request.go index 0bcae22993..fed43edf37 100644 --- a/typedapi/security/samlcompletelogout/request.go +++ b/typedapi/security/samlcompletelogout/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samlcompletelogout @@ -31,14 +31,14 @@ import ( // Request holds the request body struct for the package samlcompletelogout // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_complete_logout/Request.ts#L23-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_complete_logout/Request.ts#L23-L61 type Request struct { // Content If the SAML IdP sends the logout response with the HTTP-Post binding, this // field must be set to the value of the SAMLResponse form parameter from the // logout response. Content *string `json:"content,omitempty"` - // Ids A json array with all the valid SAML Request Ids that the caller of the API + // Ids A JSON array with all the valid SAML Request Ids that the caller of the API // has for the current user. Ids []string `json:"ids"` // QueryString If the SAML IdP sends the logout response with the HTTP-Redirect binding, diff --git a/typedapi/security/samlcompletelogout/saml_complete_logout.go b/typedapi/security/samlcompletelogout/saml_complete_logout.go index 6da7d0fcdb..d29f5c7912 100644 --- a/typedapi/security/samlcompletelogout/saml_complete_logout.go +++ b/typedapi/security/samlcompletelogout/saml_complete_logout.go @@ -16,9 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Logout of SAML completely. +// // Verifies the logout response sent from the SAML IdP. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML IdP may send a logout response back to the SP after handling the +// SP-initiated SAML Single Logout. +// This API verifies the response by ensuring the content is relevant and +// validating its signature. +// An empty response is returned if the verification process is successful. +// The response can be sent by the IdP with either the HTTP-Redirect or the +// HTTP-Post binding. +// The caller of this API must prepare the request accordingly so that this API +// can handle either of them. package samlcompletelogout import ( @@ -72,8 +89,25 @@ func NewSamlCompleteLogoutFunc(tp elastictransport.Interface) NewSamlCompleteLog } } +// Logout of SAML completely. +// // Verifies the logout response sent from the SAML IdP. // +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML IdP may send a logout response back to the SP after handling the +// SP-initiated SAML Single Logout. +// This API verifies the response by ensuring the content is relevant and +// validating its signature. +// An empty response is returned if the verification process is successful. +// The response can be sent by the IdP with either the HTTP-Redirect or the +// HTTP-Post binding. +// The caller of this API must prepare the request accordingly so that this API +// can handle either of them. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-complete-logout.html func New(tp elastictransport.Interface) *SamlCompleteLogout { r := &SamlCompleteLogout{ @@ -82,8 +116,6 @@ func New(tp elastictransport.Interface) *SamlCompleteLogout { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -283,40 +315,57 @@ func (r *SamlCompleteLogout) Pretty(pretty bool) *SamlCompleteLogout { return r } -// Content If the SAML IdP sends the logout response with the HTTP-Post binding, this +// If the SAML IdP sends the logout response with the HTTP-Post binding, this // field must be set to the value of the SAMLResponse form parameter from the // logout response. // API name: content func (r *SamlCompleteLogout) Content(content string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Content = &content return r } -// Ids A json array with all the valid SAML Request Ids that the caller of the API +// A JSON array with all the valid SAML Request Ids that the caller of the API // has for the current user. // API name: ids func (r *SamlCompleteLogout) Ids(ids ...string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ids = ids return r } -// QueryString If the SAML IdP sends the logout response with the HTTP-Redirect binding, +// If the SAML IdP sends the logout response with the HTTP-Redirect binding, // this field must be set to the query string of the redirect URI. // API name: query_string func (r *SamlCompleteLogout) QueryString(querystring string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.QueryString = &querystring return r } -// Realm The name of the SAML realm in Elasticsearch for which the configuration is +// The name of the SAML realm in Elasticsearch for which the configuration is // used to verify the logout response. // API name: realm func (r *SamlCompleteLogout) Realm(realm string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Realm = realm diff --git a/typedapi/security/samlinvalidate/request.go b/typedapi/security/samlinvalidate/request.go index 55a82945f5..fca6781183 100644 --- a/typedapi/security/samlinvalidate/request.go +++ b/typedapi/security/samlinvalidate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samlinvalidate @@ -27,28 +27,28 @@ import ( // Request holds the request body struct for the package samlinvalidate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_invalidate/Request.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_invalidate/Request.ts#L22-L61 type Request struct { // Acs The Assertion Consumer Service URL that matches the one of the SAML realm in // Elasticsearch that should be used. You must specify either this parameter or - // the realm parameter. + // the `realm` parameter. Acs *string `json:"acs,omitempty"` // QueryString The query part of the URL that the user was redirected to by the SAML IdP to // initiate the Single Logout. - // This query should include a single parameter named SAMLRequest that contains - // a SAML logout request that is deflated and Base64 encoded. + // This query should include a single parameter named `SAMLRequest` that + // contains a SAML logout request that is deflated and Base64 encoded. // If the SAML IdP has signed the logout request, the URL should include two - // extra parameters named SigAlg and Signature that contain the algorithm used - // for the signature and the signature value itself. - // In order for Elasticsearch to be able to verify the IdP’s signature, the - // value of the query_string field must be an exact match to the string provided - // by the browser. + // extra parameters named `SigAlg` and `Signature` that contain the algorithm + // used for the signature and the signature value itself. + // In order for Elasticsearch to be able to verify the IdP's signature, the + // value of the `query_string` field must be an exact match to the string + // provided by the browser. // The client application must not attempt to parse or process the string in any // way. QueryString string `json:"query_string"` // Realm The name of the SAML realm in Elasticsearch the configuration. You must - // specify either this parameter or the acs parameter. + // specify either this parameter or the `acs` parameter. Realm *string `json:"realm,omitempty"` } diff --git a/typedapi/security/samlinvalidate/response.go b/typedapi/security/samlinvalidate/response.go index 97e7bad1ce..ab094d9a22 100644 --- a/typedapi/security/samlinvalidate/response.go +++ b/typedapi/security/samlinvalidate/response.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samlinvalidate // Response holds the response body struct for the package samlinvalidate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_invalidate/Response.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_invalidate/Response.ts#L22-L37 type Response struct { - Invalidated int `json:"invalidated"` - Realm string `json:"realm"` - Redirect string `json:"redirect"` + + // Invalidated The number of tokens that were invalidated as part of this logout. + Invalidated int `json:"invalidated"` + // Realm The realm name of the SAML realm in Elasticsearch that authenticated the + // user. + Realm string `json:"realm"` + // Redirect A SAML logout response as a parameter so that the user can be redirected back + // to the SAML IdP. + Redirect string `json:"redirect"` } // NewResponse returns a Response diff --git a/typedapi/security/samlinvalidate/saml_invalidate.go b/typedapi/security/samlinvalidate/saml_invalidate.go index 85898a7b6f..779e27e742 100644 --- a/typedapi/security/samlinvalidate/saml_invalidate.go +++ b/typedapi/security/samlinvalidate/saml_invalidate.go @@ -16,9 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Submits a SAML LogoutRequest message to Elasticsearch for consumption. +// Invalidate SAML. +// +// Submit a SAML LogoutRequest message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The logout request comes from the SAML IdP during an IdP initiated Single +// Logout. +// The custom web application can use this API to have Elasticsearch process the +// `LogoutRequest`. +// After successful validation of the request, Elasticsearch invalidates the +// access token and refresh token that corresponds to that specific SAML +// principal and provides a URL that contains a SAML LogoutResponse message. +// Thus the user can be redirected back to their IdP. package samlinvalidate import ( @@ -73,7 +89,23 @@ func NewSamlInvalidateFunc(tp elastictransport.Interface) NewSamlInvalidate { } } -// Submits a SAML LogoutRequest message to Elasticsearch for consumption. +// Invalidate SAML. +// +// Submit a SAML LogoutRequest message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The logout request comes from the SAML IdP during an IdP initiated Single +// Logout. +// The custom web application can use this API to have Elasticsearch process the +// `LogoutRequest`. +// After successful validation of the request, Elasticsearch invalidates the +// access token and refresh token that corresponds to that specific SAML +// principal and provides a URL that contains a SAML LogoutResponse message. +// Thus the user can be redirected back to their IdP. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-invalidate.html func New(tp elastictransport.Interface) *SamlInvalidate { @@ -83,8 +115,6 @@ func New(tp elastictransport.Interface) *SamlInvalidate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -338,41 +368,53 @@ func (r *SamlInvalidate) Pretty(pretty bool) *SamlInvalidate { return r } -// Acs The Assertion Consumer Service URL that matches the one of the SAML realm in +// The Assertion Consumer Service URL that matches the one of the SAML realm in // Elasticsearch that should be used. You must specify either this parameter or -// the realm parameter. +// the `realm` parameter. // API name: acs func (r *SamlInvalidate) Acs(acs string) *SamlInvalidate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Acs = &acs return r } -// QueryString The query part of the URL that the user was redirected to by the SAML IdP to +// The query part of the URL that the user was redirected to by the SAML IdP to // initiate the Single Logout. -// This query should include a single parameter named SAMLRequest that contains -// a SAML logout request that is deflated and Base64 encoded. +// This query should include a single parameter named `SAMLRequest` that +// contains a SAML logout request that is deflated and Base64 encoded. // If the SAML IdP has signed the logout request, the URL should include two -// extra parameters named SigAlg and Signature that contain the algorithm used -// for the signature and the signature value itself. -// In order for Elasticsearch to be able to verify the IdP’s signature, the -// value of the query_string field must be an exact match to the string provided -// by the browser. +// extra parameters named `SigAlg` and `Signature` that contain the algorithm +// used for the signature and the signature value itself. +// In order for Elasticsearch to be able to verify the IdP's signature, the +// value of the `query_string` field must be an exact match to the string +// provided by the browser. // The client application must not attempt to parse or process the string in any // way. // API name: query_string func (r *SamlInvalidate) QueryString(querystring string) *SamlInvalidate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.QueryString = querystring return r } -// Realm The name of the SAML realm in Elasticsearch the configuration. You must -// specify either this parameter or the acs parameter. +// The name of the SAML realm in Elasticsearch the configuration. You must +// specify either this parameter or the `acs` parameter. // API name: realm func (r *SamlInvalidate) Realm(realm string) *SamlInvalidate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Realm = &realm diff --git a/typedapi/security/samllogout/request.go b/typedapi/security/samllogout/request.go index a2ee05a747..13ede40851 100644 --- a/typedapi/security/samllogout/request.go +++ b/typedapi/security/samllogout/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samllogout @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package samllogout // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_logout/Request.ts#L22-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_logout/Request.ts#L22-L57 type Request struct { // RefreshToken The refresh token that was returned as a response to calling the SAML @@ -38,7 +38,7 @@ type Request struct { // Token The access token that was returned as a response to calling the SAML // authenticate API. // Alternatively, the most recent token that was received after refreshing the - // original one by using a refresh_token. + // original one by using a `refresh_token`. Token string `json:"token"` } diff --git a/typedapi/security/samllogout/response.go b/typedapi/security/samllogout/response.go index 62339ed14d..3ef35a9541 100644 --- a/typedapi/security/samllogout/response.go +++ b/typedapi/security/samllogout/response.go @@ -16,14 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samllogout // Response holds the response body struct for the package samllogout // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_logout/Response.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_logout/Response.ts#L20-L28 type Response struct { + + // Redirect A URL that contains a SAML logout request as a parameter. + // You can use this URL to be redirected back to the SAML IdP and to initiate + // Single Logout. Redirect string `json:"redirect"` } diff --git a/typedapi/security/samllogout/saml_logout.go b/typedapi/security/samllogout/saml_logout.go index 7aff74a67b..79c9197e2c 100644 --- a/typedapi/security/samllogout/saml_logout.go +++ b/typedapi/security/samllogout/saml_logout.go @@ -16,9 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Logout of SAML. +// // Submits a request to invalidate an access token and refresh token. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API invalidates the tokens that were generated for a user by the SAML +// authenticate API. +// If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP +// supports this, the Elasticsearch response contains a URL to redirect the user +// to the IdP that contains a SAML logout request (starting an SP-initiated SAML +// Single Logout). package samllogout import ( @@ -73,8 +87,22 @@ func NewSamlLogoutFunc(tp elastictransport.Interface) NewSamlLogout { } } +// Logout of SAML. +// // Submits a request to invalidate an access token and refresh token. // +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API invalidates the tokens that were generated for a user by the SAML +// authenticate API. +// If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP +// supports this, the Elasticsearch response contains a URL to redirect the user +// to the IdP that contains a SAML logout request (starting an SP-initiated SAML +// Single Logout). +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-logout.html func New(tp elastictransport.Interface) *SamlLogout { r := &SamlLogout{ @@ -83,8 +111,6 @@ func New(tp elastictransport.Interface) *SamlLogout { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -338,24 +364,32 @@ func (r *SamlLogout) Pretty(pretty bool) *SamlLogout { return r } -// RefreshToken The refresh token that was returned as a response to calling the SAML +// The refresh token that was returned as a response to calling the SAML // authenticate API. // Alternatively, the most recent refresh token that was received after // refreshing the original access token. // API name: refresh_token func (r *SamlLogout) RefreshToken(refreshtoken string) *SamlLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RefreshToken = &refreshtoken return r } -// Token The access token that was returned as a response to calling the SAML +// The access token that was returned as a response to calling the SAML // authenticate API. // Alternatively, the most recent token that was received after refreshing the -// original one by using a refresh_token. +// original one by using a `refresh_token`. // API name: token func (r *SamlLogout) Token(token string) *SamlLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Token = token diff --git a/typedapi/security/samlprepareauthentication/request.go b/typedapi/security/samlprepareauthentication/request.go index 19f1064c8d..33f141a77c 100644 --- a/typedapi/security/samlprepareauthentication/request.go +++ b/typedapi/security/samlprepareauthentication/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samlprepareauthentication @@ -27,20 +27,20 @@ import ( // Request holds the request body struct for the package samlprepareauthentication // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_prepare_authentication/Request.ts#L22-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_prepare_authentication/Request.ts#L22-L67 type Request struct { // Acs The Assertion Consumer Service URL that matches the one of the SAML realms in // Elasticsearch. // The realm is used to generate the authentication request. You must specify - // either this parameter or the realm parameter. + // either this parameter or the `realm` parameter. Acs *string `json:"acs,omitempty"` // Realm The name of the SAML realm in Elasticsearch for which the configuration is // used to generate the authentication request. - // You must specify either this parameter or the acs parameter. + // You must specify either this parameter or the `acs` parameter. Realm *string `json:"realm,omitempty"` // RelayState A string that will be included in the redirect URL that this API returns as - // the RelayState query parameter. + // the `RelayState` query parameter. // If the Authentication Request is signed, this value is used as part of the // signature computation. RelayState *string `json:"relay_state,omitempty"` diff --git a/typedapi/security/samlprepareauthentication/response.go b/typedapi/security/samlprepareauthentication/response.go index f395073651..1d69c46788 100644 --- a/typedapi/security/samlprepareauthentication/response.go +++ b/typedapi/security/samlprepareauthentication/response.go @@ -16,16 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samlprepareauthentication // Response holds the response body struct for the package samlprepareauthentication // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_prepare_authentication/Response.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_prepare_authentication/Response.ts#L22-L37 type Response struct { - Id string `json:"id"` - Realm string `json:"realm"` + + // Id A unique identifier for the SAML Request to be stored by the caller of the + // API. + Id string `json:"id"` + // Realm The name of the Elasticsearch realm that was used to construct the + // authentication request. + Realm string `json:"realm"` + // Redirect The URL to redirect the user to. Redirect string `json:"redirect"` } diff --git a/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go b/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go index eef728a834..5b4095ceb7 100644 --- a/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go +++ b/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go @@ -16,10 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a SAML authentication request () as a URL string, based +// Prepare SAML authentication. +// +// Create a SAML authentication request (``) as a URL string based // on the configuration of the respective SAML realm in Elasticsearch. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API returns a URL pointing to the SAML Identity Provider. +// You can use the URL to redirect the browser of the user in order to continue +// the authentication process. +// The URL includes a single parameter named `SAMLRequest`, which contains a +// SAML Authentication request that is deflated and Base64 encoded. +// If the configuration dictates that SAML authentication requests should be +// signed, the URL has two extra parameters named `SigAlg` and `Signature`. +// These parameters contain the algorithm used for the signature and the +// signature value itself. +// It also returns a random string that uniquely identifies this SAML +// Authentication request. +// The caller of this API needs to store this identifier as it needs to be used +// in a following step of the authentication process. package samlprepareauthentication import ( @@ -74,9 +95,30 @@ func NewSamlPrepareAuthenticationFunc(tp elastictransport.Interface) NewSamlPrep } } -// Creates a SAML authentication request () as a URL string, based +// Prepare SAML authentication. +// +// Create a SAML authentication request (``) as a URL string based // on the configuration of the respective SAML realm in Elasticsearch. // +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API returns a URL pointing to the SAML Identity Provider. +// You can use the URL to redirect the browser of the user in order to continue +// the authentication process. +// The URL includes a single parameter named `SAMLRequest`, which contains a +// SAML Authentication request that is deflated and Base64 encoded. +// If the configuration dictates that SAML authentication requests should be +// signed, the URL has two extra parameters named `SigAlg` and `Signature`. +// These parameters contain the algorithm used for the signature and the +// signature value itself. +// It also returns a random string that uniquely identifies this SAML +// Authentication request. +// The caller of this API needs to store this identifier as it needs to be used +// in a following step of the authentication process. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-prepare-authentication.html func New(tp elastictransport.Interface) *SamlPrepareAuthentication { r := &SamlPrepareAuthentication{ @@ -85,8 +127,6 @@ func New(tp elastictransport.Interface) *SamlPrepareAuthentication { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -340,35 +380,47 @@ func (r *SamlPrepareAuthentication) Pretty(pretty bool) *SamlPrepareAuthenticati return r } -// Acs The Assertion Consumer Service URL that matches the one of the SAML realms in +// The Assertion Consumer Service URL that matches the one of the SAML realms in // Elasticsearch. // The realm is used to generate the authentication request. You must specify -// either this parameter or the realm parameter. +// either this parameter or the `realm` parameter. // API name: acs func (r *SamlPrepareAuthentication) Acs(acs string) *SamlPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Acs = &acs return r } -// Realm The name of the SAML realm in Elasticsearch for which the configuration is +// The name of the SAML realm in Elasticsearch for which the configuration is // used to generate the authentication request. -// You must specify either this parameter or the acs parameter. +// You must specify either this parameter or the `acs` parameter. // API name: realm func (r *SamlPrepareAuthentication) Realm(realm string) *SamlPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Realm = &realm return r } -// RelayState A string that will be included in the redirect URL that this API returns as -// the RelayState query parameter. +// A string that will be included in the redirect URL that this API returns as +// the `RelayState` query parameter. // If the Authentication Request is signed, this value is used as part of the // signature computation. // API name: relay_state func (r *SamlPrepareAuthentication) RelayState(relaystate string) *SamlPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RelayState = &relaystate diff --git a/typedapi/security/samlserviceprovidermetadata/response.go b/typedapi/security/samlserviceprovidermetadata/response.go index 0600aedb10..c9b73c80a9 100644 --- a/typedapi/security/samlserviceprovidermetadata/response.go +++ b/typedapi/security/samlserviceprovidermetadata/response.go @@ -16,14 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package samlserviceprovidermetadata // Response holds the response body struct for the package samlserviceprovidermetadata // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/saml_service_provider_metadata/Response.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/saml_service_provider_metadata/Response.ts#L20-L27 type Response struct { + + // Metadata An XML string that contains a SAML Service Provider's metadata for the realm. Metadata string `json:"metadata"` } diff --git a/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go b/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go index d8dc733dd5..44ac39922e 100644 --- a/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go +++ b/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Create SAML service provider metadata. +// // Generate SAML metadata for a SAML 2.0 Service Provider. +// +// The SAML 2.0 specification provides a mechanism for Service Providers to +// describe their capabilities and configuration using a metadata file. +// This API generates Service Provider metadata based on the configuration of a +// SAML realm in Elasticsearch. package samlserviceprovidermetadata import ( @@ -76,8 +83,15 @@ func NewSamlServiceProviderMetadataFunc(tp elastictransport.Interface) NewSamlSe } } +// Create SAML service provider metadata. +// // Generate SAML metadata for a SAML 2.0 Service Provider. // +// The SAML 2.0 specification provides a mechanism for Service Providers to +// describe their capabilities and configuration using a metadata file. +// This API generates Service Provider metadata based on the configuration of a +// SAML realm in Elasticsearch. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-sp-metadata.html func New(tp elastictransport.Interface) *SamlServiceProviderMetadata { r := &SamlServiceProviderMetadata{ diff --git a/typedapi/security/suggestuserprofiles/request.go b/typedapi/security/suggestuserprofiles/request.go index bddf83af98..974f3fae62 100644 --- a/typedapi/security/suggestuserprofiles/request.go +++ b/typedapi/security/suggestuserprofiles/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package suggestuserprofiles @@ -33,23 +33,27 @@ import ( // Request holds the request body struct for the package suggestuserprofiles // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/suggest_user_profiles/Request.ts#L24-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/suggest_user_profiles/Request.ts#L24-L81 type Request struct { - // Data List of filters for the `data` field of the profile document. - // To return all content use `data=*`. To return a subset of content - // use `data=` to retrieve content nested under the specified ``. - // By default returns no `data` content. + // Data A comma-separated list of filters for the `data` field of the profile + // document. + // To return all content use `data=*`. + // To return a subset of content, use `data=` to retrieve content nested + // under the specified ``. + // By default, the API returns no `data` content. + // It is an error to specify `data` as both the query parameter and the request + // body field. Data []string `json:"data,omitempty"` // Hint Extra search criteria to improve relevance of the suggestion result. // Profiles matching the spcified hint are ranked higher in the response. - // Profiles not matching the hint don't exclude the profile from the response - // as long as the profile matches the `name` field query. + // Profiles not matching the hint aren't excluded from the response as long as + // the profile matches the `name` field query. Hint *types.Hint `json:"hint,omitempty"` - // Name Query string used to match name-related fields in user profile documents. + // Name A query string used to match name-related fields in user profile documents. // Name-related fields are the user's `username`, `full_name`, and `email`. Name *string `json:"name,omitempty"` - // Size Number of profiles to return. + // Size The number of profiles to return. Size *int64 `json:"size,omitempty"` } diff --git a/typedapi/security/suggestuserprofiles/response.go b/typedapi/security/suggestuserprofiles/response.go index e16a377acc..79508d5b63 100644 --- a/typedapi/security/suggestuserprofiles/response.go +++ b/typedapi/security/suggestuserprofiles/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package suggestuserprofiles @@ -26,11 +26,16 @@ import ( // Response holds the response body struct for the package suggestuserprofiles // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/suggest_user_profiles/Response.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/suggest_user_profiles/Response.ts#L29-L44 type Response struct { - Profiles []types.UserProfile `json:"profiles"` - Took int64 `json:"took"` - Total types.TotalUserProfiles `json:"total"` + + // Profiles A list of profile documents, ordered by relevance, that match the search + // criteria. + Profiles []types.UserProfile `json:"profiles"` + // Took The number of milliseconds it took Elasticsearch to run the request. + Took int64 `json:"took"` + // Total Metadata about the number of matching profiles. + Total types.TotalUserProfiles `json:"total"` } // NewResponse returns a Response diff --git a/typedapi/security/suggestuserprofiles/suggest_user_profiles.go b/typedapi/security/suggestuserprofiles/suggest_user_profiles.go index ad2c4ed1d1..4be87744ba 100644 --- a/typedapi/security/suggestuserprofiles/suggest_user_profiles.go +++ b/typedapi/security/suggestuserprofiles/suggest_user_profiles.go @@ -16,9 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Suggest a user profile. +// // Get suggestions for user profiles that match specified search criteria. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. package suggestuserprofiles import ( @@ -73,8 +81,16 @@ func NewSuggestUserProfilesFunc(tp elastictransport.Interface) NewSuggestUserPro } } +// Suggest a user profile. +// // Get suggestions for user profiles that match specified search criteria. // +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-suggest-user-profile.html func New(tp elastictransport.Interface) *SuggestUserProfiles { r := &SuggestUserProfiles{ @@ -83,8 +99,6 @@ func New(tp elastictransport.Interface) *SuggestUserProfiles { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -338,42 +352,63 @@ func (r *SuggestUserProfiles) Pretty(pretty bool) *SuggestUserProfiles { return r } -// Data List of filters for the `data` field of the profile document. -// To return all content use `data=*`. To return a subset of content -// use `data=` to retrieve content nested under the specified ``. -// By default returns no `data` content. +// A comma-separated list of filters for the `data` field of the profile +// document. +// To return all content use `data=*`. +// To return a subset of content, use `data=` to retrieve content nested +// under the specified ``. +// By default, the API returns no `data` content. +// It is an error to specify `data` as both the query parameter and the request +// body field. // API name: data func (r *SuggestUserProfiles) Data(data ...string) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Data = make([]string, len(data)) r.req.Data = data return r } -// Hint Extra search criteria to improve relevance of the suggestion result. +// Extra search criteria to improve relevance of the suggestion result. // Profiles matching the spcified hint are ranked higher in the response. -// Profiles not matching the hint don't exclude the profile from the response -// as long as the profile matches the `name` field query. +// Profiles not matching the hint aren't excluded from the response as long as +// the profile matches the `name` field query. // API name: hint -func (r *SuggestUserProfiles) Hint(hint *types.Hint) *SuggestUserProfiles { +func (r *SuggestUserProfiles) Hint(hint types.HintVariant) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Hint = hint + r.req.Hint = hint.HintCaster() return r } -// Name Query string used to match name-related fields in user profile documents. +// A query string used to match name-related fields in user profile documents. // Name-related fields are the user's `username`, `full_name`, and `email`. // API name: name func (r *SuggestUserProfiles) Name(name string) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Name = &name return r } -// Size Number of profiles to return. +// The number of profiles to return. // API name: size func (r *SuggestUserProfiles) Size(size int64) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Size = &size diff --git a/typedapi/security/updateapikey/request.go b/typedapi/security/updateapikey/request.go index cf08a5e048..3955d401e9 100644 --- a/typedapi/security/updateapikey/request.go +++ b/typedapi/security/updateapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateapikey @@ -32,23 +32,32 @@ import ( // Request holds the request body struct for the package updateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/update_api_key/Request.ts#L26-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/update_api_key/Request.ts#L26-L91 type Request struct { - // Expiration Expiration time for the API key. + // Expiration The expiration time for the API key. + // By default, API keys never expire. + // This property can be omitted to leave the expiration unchanged. Expiration types.Duration `json:"expiration,omitempty"` - // Metadata Arbitrary metadata that you want to associate with the API key. It supports - // nested data structure. Within the metadata object, keys beginning with _ are - // reserved for system usage. + // Metadata Arbitrary metadata that you want to associate with the API key. + // It supports a nested data structure. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + // When specified, this value fully replaces the metadata previously associated + // with the API key. Metadata types.Metadata `json:"metadata,omitempty"` - // RoleDescriptors An array of role descriptors for this API key. This parameter is optional. - // When it is not specified or is an empty array, then the API key will have a - // point in time snapshot of permissions of the authenticated user. If you - // supply role descriptors then the resultant permissions would be an - // intersection of API keys permissions and authenticated user’s permissions - // thereby limiting the access scope for API keys. The structure of role - // descriptor is the same as the request for create role API. For more details, - // see create or update roles API. + // RoleDescriptors The role descriptors to assign to this API key. + // The API key's effective permissions are an intersection of its assigned + // privileges and the point in time snapshot of permissions of the owner user. + // You can assign new privileges by specifying them in this parameter. + // To remove assigned privileges, you can supply an empty `role_descriptors` + // parameter, that is to say, an empty object `{}`. + // If an API key has no assigned privileges, it inherits the owner user's full + // permissions. + // The snapshot of the owner's permissions is always updated, whether you supply + // the `role_descriptors` parameter or not. + // The structure of a role descriptor is the same as the request for the create + // API keys API. RoleDescriptors map[string]types.RoleDescriptor `json:"role_descriptors,omitempty"` } diff --git a/typedapi/security/updateapikey/response.go b/typedapi/security/updateapikey/response.go index f9354ee39c..9484c49b42 100644 --- a/typedapi/security/updateapikey/response.go +++ b/typedapi/security/updateapikey/response.go @@ -16,17 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateapikey // Response holds the response body struct for the package updateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/update_api_key/Response.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/update_api_key/Response.ts#L20-L28 type Response struct { // Updated If `true`, the API key was updated. - // If `false`, the API key didn’t change because no change was detected. + // If `false`, the API key didn't change because no change was detected. Updated bool `json:"updated"` } diff --git a/typedapi/security/updateapikey/update_api_key.go b/typedapi/security/updateapikey/update_api_key.go index 74a15a2b48..4619968676 100644 --- a/typedapi/security/updateapikey/update_api_key.go +++ b/typedapi/security/updateapikey/update_api_key.go @@ -16,33 +16,41 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Update an API key. -// Updates attributes of an existing API key. +// +// Update attributes of an existing API key. +// This API supports updates to an API key's access scope, expiration, and +// metadata. +// +// To use this API, you must have at least the `manage_own_api_key` cluster +// privilege. // Users can only update API keys that they created or that were granted to // them. -// Use this API to update API keys created by the create API Key or grant API +// To update another user’s API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. The owner user’s credentials are required. +// +// Use this API to update API keys created by the create API key or grant API // Key APIs. -// If you need to apply the same update to many API keys, you can use bulk -// update API Keys to reduce overhead. -// It’s not possible to update expired API keys, or API keys that have been -// invalidated by invalidate API Key. -// This API supports updates to an API key’s access scope and metadata. +// If you need to apply the same update to many API keys, you can use the bulk +// update API keys API to reduce overhead. +// It's not possible to update expired API keys or API keys that have been +// invalidated by the invalidate API key API. +// // The access scope of an API key is derived from the `role_descriptors` you -// specify in the request, and a snapshot of the owner user’s permissions at the +// specify in the request and a snapshot of the owner user's permissions at the // time of the request. -// The snapshot of the owner’s permissions is updated automatically on every +// The snapshot of the owner's permissions is updated automatically on every // call. -// If you don’t specify `role_descriptors` in the request, a call to this API -// might still change the API key’s access scope. -// This change can occur if the owner user’s permissions have changed since the +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change the API key's access scope. +// This change can occur if the owner user's permissions have changed since the // API key was created or last modified. -// To update another user’s API key, use the `run_as` feature to submit a -// request on behalf of another user. -// IMPORTANT: It’s not possible to use an API key as the authentication -// credential for this API. -// To update an API key, the owner user’s credentials are required. package updateapikey import ( @@ -106,30 +114,38 @@ func NewUpdateApiKeyFunc(tp elastictransport.Interface) NewUpdateApiKey { } // Update an API key. -// Updates attributes of an existing API key. +// +// Update attributes of an existing API key. +// This API supports updates to an API key's access scope, expiration, and +// metadata. +// +// To use this API, you must have at least the `manage_own_api_key` cluster +// privilege. // Users can only update API keys that they created or that were granted to // them. -// Use this API to update API keys created by the create API Key or grant API +// To update another user’s API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. The owner user’s credentials are required. +// +// Use this API to update API keys created by the create API key or grant API // Key APIs. -// If you need to apply the same update to many API keys, you can use bulk -// update API Keys to reduce overhead. -// It’s not possible to update expired API keys, or API keys that have been -// invalidated by invalidate API Key. -// This API supports updates to an API key’s access scope and metadata. +// If you need to apply the same update to many API keys, you can use the bulk +// update API keys API to reduce overhead. +// It's not possible to update expired API keys or API keys that have been +// invalidated by the invalidate API key API. +// // The access scope of an API key is derived from the `role_descriptors` you -// specify in the request, and a snapshot of the owner user’s permissions at the +// specify in the request and a snapshot of the owner user's permissions at the // time of the request. -// The snapshot of the owner’s permissions is updated automatically on every +// The snapshot of the owner's permissions is updated automatically on every // call. -// If you don’t specify `role_descriptors` in the request, a call to this API -// might still change the API key’s access scope. -// This change can occur if the owner user’s permissions have changed since the +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change the API key's access scope. +// This change can occur if the owner user's permissions have changed since the // API key was created or last modified. -// To update another user’s API key, use the `run_as` feature to submit a -// request on behalf of another user. -// IMPORTANT: It’s not possible to use an API key as the authentication -// credential for this API. -// To update an API key, the owner user’s credentials are required. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-api-key.html func New(tp elastictransport.Interface) *UpdateApiKey { @@ -139,8 +155,6 @@ func New(tp elastictransport.Interface) *UpdateApiKey { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -407,36 +421,76 @@ func (r *UpdateApiKey) Pretty(pretty bool) *UpdateApiKey { return r } -// Expiration Expiration time for the API key. +// The expiration time for the API key. +// By default, API keys never expire. +// This property can be omitted to leave the expiration unchanged. // API name: expiration -func (r *UpdateApiKey) Expiration(duration types.Duration) *UpdateApiKey { - r.req.Expiration = duration +func (r *UpdateApiKey) Expiration(duration types.DurationVariant) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() return r } -// Metadata Arbitrary metadata that you want to associate with the API key. It supports -// nested data structure. Within the metadata object, keys beginning with _ are -// reserved for system usage. +// Arbitrary metadata that you want to associate with the API key. +// It supports a nested data structure. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. +// When specified, this value fully replaces the metadata previously associated +// with the API key. // API name: metadata -func (r *UpdateApiKey) Metadata(metadata types.Metadata) *UpdateApiKey { - r.req.Metadata = metadata +func (r *UpdateApiKey) Metadata(metadata types.MetadataVariant) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } -// RoleDescriptors An array of role descriptors for this API key. This parameter is optional. -// When it is not specified or is an empty array, then the API key will have a -// point in time snapshot of permissions of the authenticated user. If you -// supply role descriptors then the resultant permissions would be an -// intersection of API keys permissions and authenticated user’s permissions -// thereby limiting the access scope for API keys. The structure of role -// descriptor is the same as the request for create role API. For more details, -// see create or update roles API. +// The role descriptors to assign to this API key. +// The API key's effective permissions are an intersection of its assigned +// privileges and the point in time snapshot of permissions of the owner user. +// You can assign new privileges by specifying them in this parameter. +// To remove assigned privileges, you can supply an empty `role_descriptors` +// parameter, that is to say, an empty object `{}`. +// If an API key has no assigned privileges, it inherits the owner user's full +// permissions. +// The snapshot of the owner's permissions is always updated, whether you supply +// the `role_descriptors` parameter or not. +// The structure of a role descriptor is the same as the request for the create +// API keys API. // API name: role_descriptors func (r *UpdateApiKey) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *UpdateApiKey { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RoleDescriptors = roledescriptors + return r +} + +func (r *UpdateApiKey) AddRoleDescriptor(key string, value types.RoleDescriptorVariant) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.RoleDescriptors == nil { + r.req.RoleDescriptors = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.RoleDescriptors + } + + tmp[key] = *value.RoleDescriptorCaster() + r.req.RoleDescriptors = tmp return r } diff --git a/typedapi/security/updatecrossclusterapikey/request.go b/typedapi/security/updatecrossclusterapikey/request.go new file mode 100644 index 0000000000..0682ab4133 --- /dev/null +++ b/typedapi/security/updatecrossclusterapikey/request.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package updatecrossclusterapikey + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package updatecrossclusterapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyRequest.ts#L25-L83 +type Request struct { + + // Access The access to be granted to this API key. + // The access is composed of permissions for cross cluster search and cross + // cluster replication. + // At least one of them must be specified. + // When specified, the new access assignment fully replaces the previously + // assigned access. + Access types.Access `json:"access"` + // Expiration The expiration time for the API key. + // By default, API keys never expire. This property can be omitted to leave the + // value unchanged. + Expiration types.Duration `json:"expiration,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the API key. + // It supports nested data structure. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + // When specified, this information fully replaces metadata previously + // associated with the API key. + Metadata types.Metadata `json:"metadata,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatecrossclusterapikey request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "access": + if err := dec.Decode(&s.Access); err != nil { + return fmt.Errorf("%s | %w", "Access", err) + } + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + } + } + return nil +} diff --git a/typedapi/security/updatecrossclusterapikey/response.go b/typedapi/security/updatecrossclusterapikey/response.go new file mode 100644 index 0000000000..0328ded8d1 --- /dev/null +++ b/typedapi/security/updatecrossclusterapikey/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package updatecrossclusterapikey + +// Response holds the response body struct for the package updatecrossclusterapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyResponse.ts#L20-L28 +type Response struct { + + // Updated If `true`, the API key was updated. + // If `false`, the API key didn’t change because no change was detected. + Updated bool `json:"updated"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go b/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go new file mode 100644 index 0000000000..bd991cb8ff --- /dev/null +++ b/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go @@ -0,0 +1,459 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Update a cross-cluster API key. +// +// Update the attributes of an existing cross-cluster API key, which is used for +// API key based remote cluster access. +// +// To use this API, you must have at least the `manage_security` cluster +// privilege. +// Users can only update API keys that they created. +// To update another user's API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. +// To update an API key, the owner user's credentials are required. +// +// It's not possible to update expired API keys, or API keys that have been +// invalidated by the invalidate API key API. +// +// This API supports updates to an API key's access scope, metadata, and +// expiration. +// The owner user's information, such as the `username` and `realm`, is also +// updated automatically on every call. +// +// NOTE: This API cannot update REST API keys, which should be updated by either +// the update API key or bulk update API keys API. +package updatecrossclusterapikey + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateCrossClusterApiKey struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateCrossClusterApiKey type alias for index. +type NewUpdateCrossClusterApiKey func(id string) *UpdateCrossClusterApiKey + +// NewUpdateCrossClusterApiKeyFunc returns a new instance of UpdateCrossClusterApiKey with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateCrossClusterApiKeyFunc(tp elastictransport.Interface) NewUpdateCrossClusterApiKey { + return func(id string) *UpdateCrossClusterApiKey { + n := New(tp) + + n._id(id) + + return n + } +} + +// Update a cross-cluster API key. +// +// Update the attributes of an existing cross-cluster API key, which is used for +// API key based remote cluster access. +// +// To use this API, you must have at least the `manage_security` cluster +// privilege. +// Users can only update API keys that they created. +// To update another user's API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. +// To update an API key, the owner user's credentials are required. +// +// It's not possible to update expired API keys, or API keys that have been +// invalidated by the invalidate API key API. +// +// This API supports updates to an API key's access scope, metadata, and +// expiration. +// The owner user's information, such as the `username` and `realm`, is also +// updated automatically on every call. +// +// NOTE: This API cannot update REST API keys, which should be updated by either +// the update API key or bulk update API keys API. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-cross-cluster-api-key.html +func New(tp elastictransport.Interface) *UpdateCrossClusterApiKey { + r := &UpdateCrossClusterApiKey{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateCrossClusterApiKey) Raw(raw io.Reader) *UpdateCrossClusterApiKey { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateCrossClusterApiKey) Request(req *Request) *UpdateCrossClusterApiKey { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateCrossClusterApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateCrossClusterApiKey: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("cross_cluster") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateCrossClusterApiKey) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.update_cross_cluster_api_key") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.update_cross_cluster_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.update_cross_cluster_api_key", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.update_cross_cluster_api_key") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateCrossClusterApiKey query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatecrossclusterapikey.Response +func (r UpdateCrossClusterApiKey) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.update_cross_cluster_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateCrossClusterApiKey headers map. +func (r *UpdateCrossClusterApiKey) Header(key, value string) *UpdateCrossClusterApiKey { + r.headers.Set(key, value) + + return r +} + +// Id The ID of the cross-cluster API key to update. +// API Name: id +func (r *UpdateCrossClusterApiKey) _id(id string) *UpdateCrossClusterApiKey { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateCrossClusterApiKey) ErrorTrace(errortrace bool) *UpdateCrossClusterApiKey { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateCrossClusterApiKey) FilterPath(filterpaths ...string) *UpdateCrossClusterApiKey { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateCrossClusterApiKey) Human(human bool) *UpdateCrossClusterApiKey { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateCrossClusterApiKey) Pretty(pretty bool) *UpdateCrossClusterApiKey { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The access to be granted to this API key. +// The access is composed of permissions for cross cluster search and cross +// cluster replication. +// At least one of them must be specified. +// When specified, the new access assignment fully replaces the previously +// assigned access. +// API name: access +func (r *UpdateCrossClusterApiKey) Access(access types.AccessVariant) *UpdateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Access = *access.AccessCaster() + + return r +} + +// The expiration time for the API key. +// By default, API keys never expire. This property can be omitted to leave the +// value unchanged. +// API name: expiration +func (r *UpdateCrossClusterApiKey) Expiration(duration types.DurationVariant) *UpdateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() + + return r +} + +// Arbitrary metadata that you want to associate with the API key. +// It supports nested data structure. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. +// When specified, this information fully replaces metadata previously +// associated with the API key. +// API name: metadata +func (r *UpdateCrossClusterApiKey) Metadata(metadata types.MetadataVariant) *UpdateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} diff --git a/typedapi/security/updatesettings/request.go b/typedapi/security/updatesettings/request.go new file mode 100644 index 0000000000..53318973e0 --- /dev/null +++ b/typedapi/security/updatesettings/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package updatesettings + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/update_settings/SecurityUpdateSettingsRequest.ts#L24-L71 +type Request struct { + + // Security Settings for the index used for most security configuration, including native + // realm users and roles configured with the API. + Security *types.SecuritySettings `json:"security,omitempty"` + // SecurityProfile Settings for the index used to store profile information. + SecurityProfile *types.SecuritySettings `json:"security-profile,omitempty"` + // SecurityTokens Settings for the index used to store tokens. + SecurityTokens *types.SecuritySettings `json:"security-tokens,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatesettings request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/security/updatesettings/response.go b/typedapi/security/updatesettings/response.go new file mode 100644 index 0000000000..3f3151c1bd --- /dev/null +++ b/typedapi/security/updatesettings/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package updatesettings + +// Response holds the response body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/update_settings/SecurityUpdateSettingsResponse.ts#L20-L24 +type Response struct { + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/security/updatesettings/update_settings.go b/typedapi/security/updatesettings/update_settings.go index bae3f53a10..4863e83c87 100644 --- a/typedapi/security/updatesettings/update_settings.go +++ b/typedapi/security/updatesettings/update_settings.go @@ -16,21 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Update settings for the security system index +// Update security index settings. +// +// Update the user-configurable settings for the security internal index +// (`.security` and associated indices). Only a subset of settings are allowed +// to be modified. This includes `index.auto_expand_replicas` and +// `index.number_of_replicas`. +// +// NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will +// be ignored during updates. +// +// If a specific index is not in use on the system and settings are provided for +// it, the request will be rejected. +// This API does not yet support configuring the settings for indices before +// they are in use. package updatesettings import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +62,10 @@ type UpdateSettings struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,7 +86,20 @@ func NewUpdateSettingsFunc(tp elastictransport.Interface) NewUpdateSettings { } } -// Update settings for the security system index +// Update security index settings. +// +// Update the user-configurable settings for the security internal index +// (`.security` and associated indices). Only a subset of settings are allowed +// to be modified. This includes `index.auto_expand_replicas` and +// `index.number_of_replicas`. +// +// NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will +// be ignored during updates. +// +// If a specific index is not in use on the system and settings are provided for +// it, the request will be rejected. +// This API does not yet support configuring the settings for indices before +// they are in use. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-settings.html func New(tp elastictransport.Interface) *UpdateSettings { @@ -73,6 +107,8 @@ func New(tp elastictransport.Interface) *UpdateSettings { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +120,21 @@ func New(tp elastictransport.Interface) *UpdateSettings { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateSettings) Raw(raw io.Reader) *UpdateSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateSettings) Request(req *Request) *UpdateSettings { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +144,31 @@ func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -180,13 +256,7 @@ func (r UpdateSettings) Perform(providedCtx context.Context) (*http.Response, er } // Do runs the request through the transport, handle the response and returns a updatesettings.Response -func (r UpdateSettings) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r UpdateSettings) IsSuccess(providedCtx context.Context) (bool, error) { +func (r UpdateSettings) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -197,30 +267,46 @@ func (r UpdateSettings) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the UpdateSettings query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err } - return false, nil + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the UpdateSettings headers map. @@ -229,3 +315,107 @@ func (r *UpdateSettings) Header(key, value string) *UpdateSettings { return r } + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *UpdateSettings) MasterTimeout(duration string) *UpdateSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *UpdateSettings) Timeout(duration string) *UpdateSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateSettings) ErrorTrace(errortrace bool) *UpdateSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateSettings) FilterPath(filterpaths ...string) *UpdateSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateSettings) Human(human bool) *UpdateSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateSettings) Pretty(pretty bool) *UpdateSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Settings for the index used for most security configuration, including native +// realm users and roles configured with the API. +// API name: security +func (r *UpdateSettings) Security(security types.SecuritySettingsVariant) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Security = security.SecuritySettingsCaster() + + return r +} + +// Settings for the index used to store profile information. +// API name: security-profile +func (r *UpdateSettings) SecurityProfile(securityprofile types.SecuritySettingsVariant) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SecurityProfile = securityprofile.SecuritySettingsCaster() + + return r +} + +// Settings for the index used to store tokens. +// API name: security-tokens +func (r *UpdateSettings) SecurityTokens(securitytokens types.SecuritySettingsVariant) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SecurityTokens = securitytokens.SecuritySettingsCaster() + + return r +} diff --git a/typedapi/security/updateuserprofiledata/request.go b/typedapi/security/updateuserprofiledata/request.go index 3e01a4b0a0..91c43fb0ac 100644 --- a/typedapi/security/updateuserprofiledata/request.go +++ b/typedapi/security/updateuserprofiledata/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateuserprofiledata @@ -27,14 +27,20 @@ import ( // Request holds the request body struct for the package updateuserprofiledata // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/update_user_profile_data/Request.ts#L27-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/update_user_profile_data/Request.ts#L27-L98 type Request struct { // Data Non-searchable data that you want to associate with the user profile. // This field supports a nested data structure. + // Within the `data` object, top-level keys cannot begin with an underscore + // (`_`) or contain a period (`.`). + // The data object is not searchable, but can be retrieved with the get user + // profile API. Data map[string]json.RawMessage `json:"data,omitempty"` - // Labels Searchable data that you want to associate with the user profile. This - // field supports a nested data structure. + // Labels Searchable data that you want to associate with the user profile. + // This field supports a nested data structure. + // Within the labels object, top-level keys cannot begin with an underscore + // (`_`) or contain a period (`.`). Labels map[string]json.RawMessage `json:"labels,omitempty"` } diff --git a/typedapi/security/updateuserprofiledata/response.go b/typedapi/security/updateuserprofiledata/response.go index 1b356772cb..460f6de8ea 100644 --- a/typedapi/security/updateuserprofiledata/response.go +++ b/typedapi/security/updateuserprofiledata/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updateuserprofiledata // Response holds the response body struct for the package updateuserprofiledata // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/update_user_profile_data/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/update_user_profile_data/Response.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/security/updateuserprofiledata/update_user_profile_data.go b/typedapi/security/updateuserprofiledata/update_user_profile_data.go index 91377a9fed..3e30ec56c1 100644 --- a/typedapi/security/updateuserprofiledata/update_user_profile_data.go +++ b/typedapi/security/updateuserprofiledata/update_user_profile_data.go @@ -16,10 +16,33 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Updates specific data for the user profile that's associated with the -// specified unique ID. +// Update user profile data. +// +// Update specific data for the user profile that is associated with a unique +// ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_user_profile` cluster privilege. +// * The `update_profile_data` global privilege for the namespaces that are +// referenced in the request. +// +// This API updates the `labels` and `data` fields of an existing user profile +// document with JSON objects. +// New keys and their values are added to the profile document and conflicting +// keys are replaced by data that's included in the request. +// +// For both labels and data, content is namespaced by the top-level fields. +// The `update_profile_data` global privilege grants privileges for updating +// only the allowed namespaces. package updateuserprofiledata import ( @@ -83,8 +106,31 @@ func NewUpdateUserProfileDataFunc(tp elastictransport.Interface) NewUpdateUserPr } } -// Updates specific data for the user profile that's associated with the -// specified unique ID. +// Update user profile data. +// +// Update specific data for the user profile that is associated with a unique +// ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_user_profile` cluster privilege. +// * The `update_profile_data` global privilege for the namespaces that are +// referenced in the request. +// +// This API updates the `labels` and `data` fields of an existing user profile +// document with JSON objects. +// New keys and their values are added to the profile document and conflicting +// keys are replaced by data that's included in the request. +// +// For both labels and data, content is namespaced by the top-level fields. +// The `update_profile_data` global privilege grants privileges for updating +// only the allowed namespaces. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-user-profile-data.html func New(tp elastictransport.Interface) *UpdateUserProfileData { @@ -94,8 +140,6 @@ func New(tp elastictransport.Interface) *UpdateUserProfileData { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -337,9 +381,10 @@ func (r *UpdateUserProfileData) IfPrimaryTerm(ifprimaryterm string) *UpdateUserP } // Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation -// visible to search, if 'wait_for' then wait for a refresh to make this -// operation -// visible to search, if 'false' do nothing with refreshes. +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', nothing is done with refreshes. // API name: refresh func (r *UpdateUserProfileData) Refresh(refresh refresh.Refresh) *UpdateUserProfileData { r.values.Set("refresh", refresh.String()) @@ -391,22 +436,70 @@ func (r *UpdateUserProfileData) Pretty(pretty bool) *UpdateUserProfileData { return r } -// Data Non-searchable data that you want to associate with the user profile. +// Non-searchable data that you want to associate with the user profile. // This field supports a nested data structure. +// Within the `data` object, top-level keys cannot begin with an underscore +// (`_`) or contain a period (`.`). +// The data object is not searchable, but can be retrieved with the get user +// profile API. // API name: data func (r *UpdateUserProfileData) Data(data map[string]json.RawMessage) *UpdateUserProfileData { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Data = data + return r +} +func (r *UpdateUserProfileData) AddDatum(key string, value json.RawMessage) *UpdateUserProfileData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Data == nil { + r.req.Data = make(map[string]json.RawMessage) + } else { + tmp = r.req.Data + } + + tmp[key] = value + + r.req.Data = tmp return r } -// Labels Searchable data that you want to associate with the user profile. This -// field supports a nested data structure. +// Searchable data that you want to associate with the user profile. +// This field supports a nested data structure. +// Within the labels object, top-level keys cannot begin with an underscore +// (`_`) or contain a period (`.`). // API name: labels func (r *UpdateUserProfileData) Labels(labels map[string]json.RawMessage) *UpdateUserProfileData { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Labels = labels + return r +} + +func (r *UpdateUserProfileData) AddLabel(key string, value json.RawMessage) *UpdateUserProfileData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Labels == nil { + r.req.Labels = make(map[string]json.RawMessage) + } else { + tmp = r.req.Labels + } + + tmp[key] = value + r.req.Labels = tmp return r } diff --git a/typedapi/shutdown/deletenode/delete_node.go b/typedapi/shutdown/deletenode/delete_node.go index 721154c3ec..0cea5185fb 100644 --- a/typedapi/shutdown/deletenode/delete_node.go +++ b/typedapi/shutdown/deletenode/delete_node.go @@ -16,10 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Removes a node from the shutdown list. Designed for indirect use by ECE/ESS -// and ECK. Direct use is not supported. +// Cancel node shutdown preparations. +// Remove a node from the shutdown list so it can resume normal operations. +// You must explicitly clear the shutdown request when a node rejoins the +// cluster or when a node has permanently left the cluster. +// Shutdown requests are never removed automatically by Elasticsearch. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. +// Direct use is not supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. package deletenode import ( @@ -78,10 +88,20 @@ func NewDeleteNodeFunc(tp elastictransport.Interface) NewDeleteNode { } } -// Removes a node from the shutdown list. Designed for indirect use by ECE/ESS -// and ECK. Direct use is not supported. +// Cancel node shutdown preparations. +// Remove a node from the shutdown list so it can resume normal operations. +// You must explicitly clear the shutdown request when a node rejoins the +// cluster or when a node has permanently left the cluster. +// Shutdown requests are never removed automatically by Elasticsearch. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. +// Direct use is not supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current +// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-shutdown.html func New(tp elastictransport.Interface) *DeleteNode { r := &DeleteNode{ transport: tp, diff --git a/typedapi/shutdown/deletenode/response.go b/typedapi/shutdown/deletenode/response.go index 706508f653..dc2938eb5e 100644 --- a/typedapi/shutdown/deletenode/response.go +++ b/typedapi/shutdown/deletenode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletenode // Response holds the response body struct for the package deletenode // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/delete_node/ShutdownDeleteNodeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/delete_node/ShutdownDeleteNodeResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/shutdown/getnode/get_node.go b/typedapi/shutdown/getnode/get_node.go index 34be55ec4c..55533fdf9f 100644 --- a/typedapi/shutdown/getnode/get_node.go +++ b/typedapi/shutdown/getnode/get_node.go @@ -16,11 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieve status of a node or nodes that are currently marked as shutting -// down. Designed for indirect use by ECE/ESS and ECK. Direct use is not +// Get the shutdown status. +// +// Get information about nodes that are ready to be shut down, have shut down +// preparations still in progress, or have stalled. +// The API returns status information for each part of the shut down process. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. package getnode import ( @@ -77,11 +86,20 @@ func NewGetNodeFunc(tp elastictransport.Interface) NewGetNode { } } -// Retrieve status of a node or nodes that are currently marked as shutting -// down. Designed for indirect use by ECE/ESS and ECK. Direct use is not +// Get the shutdown status. +// +// Get information about nodes that are ready to be shut down, have shut down +// preparations still in progress, or have stalled. +// The API returns status information for each part of the shut down process. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not // supported. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-shutdown.html func New(tp elastictransport.Interface) *GetNode { r := &GetNode{ transport: tp, @@ -324,15 +342,6 @@ func (r *GetNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *GetNode { return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. -// API name: timeout -func (r *GetNode) Timeout(timeout timeunit.TimeUnit) *GetNode { - r.values.Set("timeout", timeout.String()) - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/shutdown/getnode/response.go b/typedapi/shutdown/getnode/response.go index 166dff383a..a93977088c 100644 --- a/typedapi/shutdown/getnode/response.go +++ b/typedapi/shutdown/getnode/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getnode @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getnode // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L23-L27 type Response struct { Nodes []types.NodeShutdownStatus `json:"nodes"` } diff --git a/typedapi/shutdown/putnode/put_node.go b/typedapi/shutdown/putnode/put_node.go index efffd07859..82da5480d2 100644 --- a/typedapi/shutdown/putnode/put_node.go +++ b/typedapi/shutdown/putnode/put_node.go @@ -16,10 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. -// Direct use is not supported. +// Prepare a node to be shut down. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If you specify a node that is offline, it will be prepared for shut down when +// it rejoins the cluster. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// +// The API migrates ongoing tasks and index shards to other nodes as needed to +// prepare a node to be restarted or shut down and removed from the cluster. +// This ensures that Elasticsearch can be stopped safely with minimal disruption +// to the cluster. +// +// You must specify the type of shutdown: `restart`, `remove`, or `replace`. +// If a node is already being prepared for shutdown, you can use this API to +// change the shutdown type. +// +// IMPORTANT: This API does NOT terminate the Elasticsearch process. +// Monitor the node shutdown status to determine when it is safe to stop +// Elasticsearch. package putnode import ( @@ -84,10 +106,32 @@ func NewPutNodeFunc(tp elastictransport.Interface) NewPutNode { } } -// Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. -// Direct use is not supported. +// Prepare a node to be shut down. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If you specify a node that is offline, it will be prepared for shut down when +// it rejoins the cluster. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current +// The API migrates ongoing tasks and index shards to other nodes as needed to +// prepare a node to be restarted or shut down and removed from the cluster. +// This ensures that Elasticsearch can be stopped safely with minimal disruption +// to the cluster. +// +// You must specify the type of shutdown: `restart`, `remove`, or `replace`. +// If a node is already being prepared for shutdown, you can use this API to +// change the shutdown type. +// +// IMPORTANT: This API does NOT terminate the Elasticsearch process. +// Monitor the node shutdown status to determine when it is safe to stop +// Elasticsearch. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-shutdown.html func New(tp elastictransport.Interface) *PutNode { r := &PutNode{ transport: tp, @@ -95,8 +139,6 @@ func New(tp elastictransport.Interface) *PutNode { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -310,7 +352,10 @@ func (r *PutNode) Header(key, value string) *PutNode { return r } -// NodeId The node id of node to be shut down +// NodeId The node identifier. +// This parameter is not validated against the cluster's active nodes. +// This enables you to register a node for shut down while it is offline. +// No error is thrown if you specify an invalid node ID. // API Name: nodeid func (r *PutNode) _nodeid(nodeid string) *PutNode { r.paramSet |= nodeidMask @@ -319,8 +364,9 @@ func (r *PutNode) _nodeid(nodeid string) *PutNode { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout func (r *PutNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *PutNode { r.values.Set("master_timeout", mastertimeout.String()) @@ -328,8 +374,9 @@ func (r *PutNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *PutNode { return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout func (r *PutNode) Timeout(timeout timeunit.TimeUnit) *PutNode { r.values.Set("timeout", timeout.String()) @@ -381,7 +428,7 @@ func (r *PutNode) Pretty(pretty bool) *PutNode { return r } -// AllocationDelay Only valid if type is restart. +// Only valid if type is restart. // Controls how long Elasticsearch will wait for the node to restart and join // the cluster before reassigning its shards to other nodes. // This works the same as delaying allocation with the @@ -390,24 +437,32 @@ func (r *PutNode) Pretty(pretty bool) *PutNode { // delay, the longer of the two is used. // API name: allocation_delay func (r *PutNode) AllocationDelay(allocationdelay string) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AllocationDelay = &allocationdelay return r } -// Reason A human-readable reason that the node is being shut down. +// A human-readable reason that the node is being shut down. // This field provides information for other cluster operators; it does not // affect the shut down process. // API name: reason func (r *PutNode) Reason(reason string) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Reason = reason return r } -// TargetNodeName Only valid if type is replace. +// Only valid if type is replace. // Specifies the name of the node that is replacing the node being shut down. // Shards from the shut down node are only allowed to be allocated to the target // node, and no other data will be allocated to the target node. @@ -415,13 +470,17 @@ func (r *PutNode) Reason(reason string) *PutNode { // watermarks or user attribute filtering rules. // API name: target_node_name func (r *PutNode) TargetNodeName(targetnodename string) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.TargetNodeName = &targetnodename return r } -// Type Valid values are restart, remove, or replace. +// Valid values are restart, remove, or replace. // Use restart when you need to temporarily shut down a node to perform an // upgrade, make configuration changes, or perform other maintenance. // Because the node is expected to rejoin the cluster, data is not migrated off @@ -435,7 +494,10 @@ func (r *PutNode) TargetNodeName(targetnodename string) *PutNode { // unassigned shards, and shrink may fail until the replacement is complete. // API name: type func (r *PutNode) Type(type_ type_.Type) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Type = type_ - return r } diff --git a/typedapi/shutdown/putnode/request.go b/typedapi/shutdown/putnode/request.go index e6c364043c..a22fe971b0 100644 --- a/typedapi/shutdown/putnode/request.go +++ b/typedapi/shutdown/putnode/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putnode @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putnode // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L108 type Request struct { // AllocationDelay Only valid if type is restart. diff --git a/typedapi/shutdown/putnode/response.go b/typedapi/shutdown/putnode/response.go index e4b376594c..abe7edc148 100644 --- a/typedapi/shutdown/putnode/response.go +++ b/typedapi/shutdown/putnode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putnode // Response holds the response body struct for the package putnode // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/put_node/ShutdownPutNodeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/put_node/ShutdownPutNodeResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/simulate/ingest/ingest.go b/typedapi/simulate/ingest/ingest.go new file mode 100644 index 0000000000..940ef94c3a --- /dev/null +++ b/typedapi/simulate/ingest/ingest.go @@ -0,0 +1,564 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Simulate data ingestion. +// Run ingest pipelines against a set of provided documents, optionally with +// substitute pipeline definitions, to simulate ingesting data into an index. +// +// This API is meant to be used for troubleshooting or pipeline development, as +// it does not actually index any data into Elasticsearch. +// +// The API runs the default and final pipeline for that index against a set of +// documents provided in the body of the request. +// If a pipeline contains a reroute processor, it follows that reroute processor +// to the new index, running that index's pipelines as well the same way that a +// non-simulated ingest would. +// No data is indexed into Elasticsearch. +// Instead, the transformed document is returned, along with the list of +// pipelines that have been run and the name of the index where the document +// would have been indexed if this were not a simulation. +// The transformed document is validated against the mappings that would apply +// to this index, and any validation error is reported in the result. +// +// This API differs from the simulate pipeline API in that you specify a single +// pipeline for that API, and it runs only that one pipeline. +// The simulate pipeline API is more useful for developing a single pipeline, +// while the simulate ingest API is more useful for troubleshooting the +// interaction of the various pipelines that get applied when ingesting into an +// index. +// +// By default, the pipeline definitions that are currently in the system are +// used. +// However, you can supply substitute pipeline definitions in the body of the +// request. +// These will be used in place of the pipeline definitions that are already in +// the system. This can be used to replace existing pipeline definitions or to +// create new ones. The pipeline substitutions are used only within this +// request. +package ingest + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Ingest struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewIngest type alias for index. +type NewIngest func() *Ingest + +// NewIngestFunc returns a new instance of Ingest with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewIngestFunc(tp elastictransport.Interface) NewIngest { + return func() *Ingest { + n := New(tp) + + return n + } +} + +// Simulate data ingestion. +// Run ingest pipelines against a set of provided documents, optionally with +// substitute pipeline definitions, to simulate ingesting data into an index. +// +// This API is meant to be used for troubleshooting or pipeline development, as +// it does not actually index any data into Elasticsearch. +// +// The API runs the default and final pipeline for that index against a set of +// documents provided in the body of the request. +// If a pipeline contains a reroute processor, it follows that reroute processor +// to the new index, running that index's pipelines as well the same way that a +// non-simulated ingest would. +// No data is indexed into Elasticsearch. +// Instead, the transformed document is returned, along with the list of +// pipelines that have been run and the name of the index where the document +// would have been indexed if this were not a simulation. +// The transformed document is validated against the mappings that would apply +// to this index, and any validation error is reported in the result. +// +// This API differs from the simulate pipeline API in that you specify a single +// pipeline for that API, and it runs only that one pipeline. +// The simulate pipeline API is more useful for developing a single pipeline, +// while the simulate ingest API is more useful for troubleshooting the +// interaction of the various pipelines that get applied when ingesting into an +// index. +// +// By default, the pipeline definitions that are currently in the system are +// used. +// However, you can supply substitute pipeline definitions in the body of the +// request. +// These will be used in place of the pipeline definitions that are already in +// the system. This can be used to replace existing pipeline definitions or to +// create new ones. The pipeline substitutions are used only within this +// request. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/simulate-ingest-api.html +func New(tp elastictransport.Interface) *Ingest { + r := &Ingest{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Ingest) Raw(raw io.Reader) *Ingest { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Ingest) Request(req *Request) *Ingest { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Ingest) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Ingest: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("_simulate") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_simulate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Ingest) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "simulate.ingest") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "simulate.ingest") + if reader := instrument.RecordRequestBody(ctx, "simulate.ingest", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "simulate.ingest") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Ingest query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a ingest.Response +func (r Ingest) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "simulate.ingest") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Ingest headers map. +func (r *Ingest) Header(key, value string) *Ingest { + r.headers.Set(key, value) + + return r +} + +// Index The index to simulate ingesting into. +// This value can be overridden by specifying an index on each document. +// If you specify this parameter in the request path, it is used for any +// documents that do not explicitly specify an index argument. +// API Name: index +func (r *Ingest) Index(index string) *Ingest { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Pipeline The pipeline to use as the default pipeline. +// This value can be used to override the default pipeline of the index. +// API name: pipeline +func (r *Ingest) Pipeline(pipelinename string) *Ingest { + r.values.Set("pipeline", pipelinename) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Ingest) ErrorTrace(errortrace bool) *Ingest { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Ingest) FilterPath(filterpaths ...string) *Ingest { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Ingest) Human(human bool) *Ingest { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Ingest) Pretty(pretty bool) *Ingest { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A map of component template names to substitute component template definition +// objects. +// API name: component_template_substitutions +func (r *Ingest) ComponentTemplateSubstitutions(componenttemplatesubstitutions map[string]types.ComponentTemplateNode) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ComponentTemplateSubstitutions = componenttemplatesubstitutions + return r +} + +func (r *Ingest) AddComponentTemplateSubstitution(key string, value types.ComponentTemplateNodeVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ComponentTemplateNode + if r.req.ComponentTemplateSubstitutions == nil { + r.req.ComponentTemplateSubstitutions = make(map[string]types.ComponentTemplateNode) + } else { + tmp = r.req.ComponentTemplateSubstitutions + } + + tmp[key] = *value.ComponentTemplateNodeCaster() + + r.req.ComponentTemplateSubstitutions = tmp + return r +} + +// Sample documents to test in the pipeline. +// API name: docs +func (r *Ingest) Docs(docs ...types.DocumentVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + + r.req.Docs = append(r.req.Docs, *v.DocumentCaster()) + + } + return r +} + +// A map of index template names to substitute index template definition +// objects. +// API name: index_template_substitutions +func (r *Ingest) IndexTemplateSubstitutions(indextemplatesubstitutions map[string]types.IndexTemplate) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexTemplateSubstitutions = indextemplatesubstitutions + return r +} + +func (r *Ingest) AddIndexTemplateSubstitution(key string, value types.IndexTemplateVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.IndexTemplate + if r.req.IndexTemplateSubstitutions == nil { + r.req.IndexTemplateSubstitutions = make(map[string]types.IndexTemplate) + } else { + tmp = r.req.IndexTemplateSubstitutions + } + + tmp[key] = *value.IndexTemplateCaster() + + r.req.IndexTemplateSubstitutions = tmp + return r +} + +// API name: mapping_addition +func (r *Ingest) MappingAddition(mappingaddition types.TypeMappingVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MappingAddition = mappingaddition.TypeMappingCaster() + + return r +} + +// Pipelines to test. +// If you don’t specify the `pipeline` request path parameter, this parameter is +// required. +// If you specify both this and the request path parameter, the API only uses +// the request path parameter. +// API name: pipeline_substitutions +func (r *Ingest) PipelineSubstitutions(pipelinesubstitutions map[string]types.IngestPipeline) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.PipelineSubstitutions = pipelinesubstitutions + return r +} + +func (r *Ingest) AddPipelineSubstitution(key string, value types.IngestPipelineVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.IngestPipeline + if r.req.PipelineSubstitutions == nil { + r.req.PipelineSubstitutions = make(map[string]types.IngestPipeline) + } else { + tmp = r.req.PipelineSubstitutions + } + + tmp[key] = *value.IngestPipelineCaster() + + r.req.PipelineSubstitutions = tmp + return r +} diff --git a/typedapi/simulate/ingest/request.go b/typedapi/simulate/ingest/request.go new file mode 100644 index 0000000000..ef14da184f --- /dev/null +++ b/typedapi/simulate/ingest/request.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package ingest + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package ingest +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/simulate/ingest/SimulateIngestRequest.ts#L29-L100 +type Request struct { + + // ComponentTemplateSubstitutions A map of component template names to substitute component template definition + // objects. + ComponentTemplateSubstitutions map[string]types.ComponentTemplateNode `json:"component_template_substitutions,omitempty"` + // Docs Sample documents to test in the pipeline. + Docs []types.Document `json:"docs"` + // IndexTemplateSubstitutions A map of index template names to substitute index template definition + // objects. + IndexTemplateSubstitutions map[string]types.IndexTemplate `json:"index_template_substitutions,omitempty"` + MappingAddition *types.TypeMapping `json:"mapping_addition,omitempty"` + // PipelineSubstitutions Pipelines to test. + // If you don’t specify the `pipeline` request path parameter, this parameter is + // required. + // If you specify both this and the request path parameter, the API only uses + // the request path parameter. + PipelineSubstitutions map[string]types.IngestPipeline `json:"pipeline_substitutions,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + ComponentTemplateSubstitutions: make(map[string]types.ComponentTemplateNode, 0), + IndexTemplateSubstitutions: make(map[string]types.IndexTemplate, 0), + PipelineSubstitutions: make(map[string]types.IngestPipeline, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Ingest request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/simulate/ingest/response.go b/typedapi/simulate/ingest/response.go new file mode 100644 index 0000000000..560002cd0f --- /dev/null +++ b/typedapi/simulate/ingest/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package ingest + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package ingest +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/simulate/ingest/SimulateIngestResponse.ts#L27-L29 +type Response struct { + Docs []types.SimulateIngestDocumentResult `json:"docs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/slm/deletelifecycle/delete_lifecycle.go b/typedapi/slm/deletelifecycle/delete_lifecycle.go index 4305bf2d0e..4986a460af 100644 --- a/typedapi/slm/deletelifecycle/delete_lifecycle.go +++ b/typedapi/slm/deletelifecycle/delete_lifecycle.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes an existing snapshot lifecycle policy. +// Delete a policy. +// Delete a snapshot lifecycle policy definition. +// This operation prevents any future snapshots from being taken but does not +// cancel in-progress snapshots or remove previously-taken snapshots. package deletelifecycle import ( @@ -76,7 +79,10 @@ func NewDeleteLifecycleFunc(tp elastictransport.Interface) NewDeleteLifecycle { } } -// Deletes an existing snapshot lifecycle policy. +// Delete a policy. +// Delete a snapshot lifecycle policy definition. +// This operation prevents any future snapshots from being taken but does not +// cancel in-progress snapshots or remove previously-taken snapshots. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-policy.html func New(tp elastictransport.Interface) *DeleteLifecycle { @@ -299,6 +305,26 @@ func (r *DeleteLifecycle) _policyid(policyid string) *DeleteLifecycle { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteLifecycle) MasterTimeout(duration string) *DeleteLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *DeleteLifecycle) Timeout(duration string) *DeleteLifecycle { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/deletelifecycle/response.go b/typedapi/slm/deletelifecycle/response.go index 8502669388..75eff619de 100644 --- a/typedapi/slm/deletelifecycle/response.go +++ b/typedapi/slm/deletelifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletelifecycle // Response holds the response body struct for the package deletelifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/executelifecycle/execute_lifecycle.go b/typedapi/slm/executelifecycle/execute_lifecycle.go index a0f8adf5ed..cbee28d9b0 100644 --- a/typedapi/slm/executelifecycle/execute_lifecycle.go +++ b/typedapi/slm/executelifecycle/execute_lifecycle.go @@ -16,10 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Immediately creates a snapshot according to the lifecycle policy, without -// waiting for the scheduled time. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Run a policy. +// Immediately create a snapshot according to the snapshot lifecycle policy +// without waiting for the scheduled time. +// The snapshot policy is normally applied according to its schedule, but you +// might want to manually run a policy before performing an upgrade or other +// maintenance. package executelifecycle import ( @@ -77,8 +81,12 @@ func NewExecuteLifecycleFunc(tp elastictransport.Interface) NewExecuteLifecycle } } -// Immediately creates a snapshot according to the lifecycle policy, without -// waiting for the scheduled time. +// Run a policy. +// Immediately create a snapshot according to the snapshot lifecycle policy +// without waiting for the scheduled time. +// The snapshot policy is normally applied according to its schedule, but you +// might want to manually run a policy before performing an upgrade or other +// maintenance. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-lifecycle.html func New(tp elastictransport.Interface) *ExecuteLifecycle { @@ -303,6 +311,26 @@ func (r *ExecuteLifecycle) _policyid(policyid string) *ExecuteLifecycle { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *ExecuteLifecycle) MasterTimeout(duration string) *ExecuteLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *ExecuteLifecycle) Timeout(duration string) *ExecuteLifecycle { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/executelifecycle/response.go b/typedapi/slm/executelifecycle/response.go index 288214a10d..86d2625c14 100644 --- a/typedapi/slm/executelifecycle/response.go +++ b/typedapi/slm/executelifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package executelifecycle // Response holds the response body struct for the package executelifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/execute_lifecycle/ExecuteSnapshotLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/execute_lifecycle/ExecuteSnapshotLifecycleResponse.ts#L22-L24 type Response struct { SnapshotName string `json:"snapshot_name"` } diff --git a/typedapi/slm/executeretention/execute_retention.go b/typedapi/slm/executeretention/execute_retention.go index 43c4c2c983..645cf272a4 100644 --- a/typedapi/slm/executeretention/execute_retention.go +++ b/typedapi/slm/executeretention/execute_retention.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes any snapshots that are expired according to the policy's retention -// rules. +// Run a retention policy. +// Manually apply the retention policy to force immediate removal of snapshots +// that are expired according to the snapshot lifecycle policy retention rules. +// The retention policy is normally applied according to its schedule. package executeretention import ( @@ -69,8 +71,10 @@ func NewExecuteRetentionFunc(tp elastictransport.Interface) NewExecuteRetention } } -// Deletes any snapshots that are expired according to the policy's retention -// rules. +// Run a retention policy. +// Manually apply the retention policy to force immediate removal of snapshots +// that are expired according to the snapshot lifecycle policy retention rules. +// The retention policy is normally applied according to its schedule. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-retention.html func New(tp elastictransport.Interface) *ExecuteRetention { @@ -278,6 +282,26 @@ func (r *ExecuteRetention) Header(key, value string) *ExecuteRetention { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *ExecuteRetention) MasterTimeout(duration string) *ExecuteRetention { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *ExecuteRetention) Timeout(duration string) *ExecuteRetention { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/executeretention/response.go b/typedapi/slm/executeretention/response.go index 7fe8deb5e6..383cd66fe9 100644 --- a/typedapi/slm/executeretention/response.go +++ b/typedapi/slm/executeretention/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package executeretention // Response holds the response body struct for the package executeretention // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/execute_retention/ExecuteRetentionResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/execute_retention/ExecuteRetentionResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/getlifecycle/get_lifecycle.go b/typedapi/slm/getlifecycle/get_lifecycle.go index 759f78916f..b383c119a8 100644 --- a/typedapi/slm/getlifecycle/get_lifecycle.go +++ b/typedapi/slm/getlifecycle/get_lifecycle.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves one or more snapshot lifecycle policy definitions and information -// about the latest snapshot attempts. +// Get policy information. +// Get snapshot lifecycle policy definitions and information about the latest +// snapshot attempts. package getlifecycle import ( @@ -75,8 +76,9 @@ func NewGetLifecycleFunc(tp elastictransport.Interface) NewGetLifecycle { } } -// Retrieves one or more snapshot lifecycle policy definitions and information -// about the latest snapshot attempts. +// Get policy information. +// Get snapshot lifecycle policy definitions and information about the latest +// snapshot attempts. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-policy.html func New(tp elastictransport.Interface) *GetLifecycle { @@ -306,6 +308,26 @@ func (r *GetLifecycle) PolicyId(policyid string) *GetLifecycle { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetLifecycle) MasterTimeout(duration string) *GetLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *GetLifecycle) Timeout(duration string) *GetLifecycle { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/getlifecycle/response.go b/typedapi/slm/getlifecycle/response.go index 3a49de23b4..780dd329a2 100644 --- a/typedapi/slm/getlifecycle/response.go +++ b/typedapi/slm/getlifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getlifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L27 type Response map[string]types.SnapshotLifecycle diff --git a/typedapi/slm/getstats/get_stats.go b/typedapi/slm/getstats/get_stats.go index 34b145238d..4374716926 100644 --- a/typedapi/slm/getstats/get_stats.go +++ b/typedapi/slm/getstats/get_stats.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns global and policy-level statistics about actions taken by snapshot +// Get snapshot lifecycle management statistics. +// Get global and policy-level statistics about actions taken by snapshot // lifecycle management. package getstats @@ -69,7 +70,8 @@ func NewGetStatsFunc(tp elastictransport.Interface) NewGetStats { } } -// Returns global and policy-level statistics about actions taken by snapshot +// Get snapshot lifecycle management statistics. +// Get global and policy-level statistics about actions taken by snapshot // lifecycle management. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-stats.html @@ -278,6 +280,24 @@ func (r *GetStats) Header(key, value string) *GetStats { return r } +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetStats) MasterTimeout(duration string) *GetStats { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *GetStats) Timeout(duration string) *GetStats { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/getstats/response.go b/typedapi/slm/getstats/response.go index f8819c7bb3..f3d8aeba25 100644 --- a/typedapi/slm/getstats/response.go +++ b/typedapi/slm/getstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getstats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L23-L36 type Response struct { PolicyStats []string `json:"policy_stats"` RetentionDeletionTime types.Duration `json:"retention_deletion_time"` diff --git a/typedapi/slm/getstatus/get_status.go b/typedapi/slm/getstatus/get_status.go index ceed906597..51f2d4cf79 100644 --- a/typedapi/slm/getstatus/get_status.go +++ b/typedapi/slm/getstatus/get_status.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves the status of snapshot lifecycle management (SLM). +// Get the snapshot lifecycle management status. package getstatus import ( @@ -68,7 +68,7 @@ func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { } } -// Retrieves the status of snapshot lifecycle management (SLM). +// Get the snapshot lifecycle management status. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-status.html func New(tp elastictransport.Interface) *GetStatus { @@ -276,6 +276,28 @@ func (r *GetStatus) Header(key, value string) *GetStatus { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *GetStatus) MasterTimeout(duration string) *GetStatus { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *GetStatus) Timeout(duration string) *GetStatus { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/getstatus/response.go b/typedapi/slm/getstatus/response.go index fd823cf65b..5898d39e3e 100644 --- a/typedapi/slm/getstatus/response.go +++ b/typedapi/slm/getstatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getstatus @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getstatus // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24 type Response struct { OperationMode lifecycleoperationmode.LifecycleOperationMode `json:"operation_mode"` } diff --git a/typedapi/slm/putlifecycle/put_lifecycle.go b/typedapi/slm/putlifecycle/put_lifecycle.go index ec3fca38ed..6345078b91 100644 --- a/typedapi/slm/putlifecycle/put_lifecycle.go +++ b/typedapi/slm/putlifecycle/put_lifecycle.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a snapshot lifecycle policy. +// Create or update a policy. +// Create or update a snapshot lifecycle policy. +// If the policy already exists, this request increments the policy version. +// Only the latest version of a policy is stored. package putlifecycle import ( @@ -81,7 +84,10 @@ func NewPutLifecycleFunc(tp elastictransport.Interface) NewPutLifecycle { } } -// Creates or updates a snapshot lifecycle policy. +// Create or update a policy. +// Create or update a snapshot lifecycle policy. +// If the policy already exists, this request increments the policy version. +// Only the latest version of a policy is stored. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-policy.html func New(tp elastictransport.Interface) *PutLifecycle { @@ -91,8 +97,6 @@ func New(tp elastictransport.Interface) *PutLifecycle { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -306,7 +310,8 @@ func (r *PutLifecycle) Header(key, value string) *PutLifecycle { return r } -// PolicyId ID for the snapshot lifecycle policy you want to create or update. +// PolicyId The identifier for the snapshot lifecycle policy you want to create or +// update. // API Name: policyid func (r *PutLifecycle) _policyid(policyid string) *PutLifecycle { r.paramSet |= policyidMask @@ -315,8 +320,10 @@ func (r *PutLifecycle) _policyid(policyid string) *PutLifecycle { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *PutLifecycle) MasterTimeout(duration string) *PutLifecycle { r.values.Set("master_timeout", duration) @@ -324,8 +331,10 @@ func (r *PutLifecycle) MasterTimeout(duration string) *PutLifecycle { return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: timeout func (r *PutLifecycle) Timeout(duration string) *PutLifecycle { r.values.Set("timeout", duration) @@ -377,49 +386,71 @@ func (r *PutLifecycle) Pretty(pretty bool) *PutLifecycle { return r } -// Config Configuration for each snapshot created by the policy. +// Configuration for each snapshot created by the policy. // API name: config -func (r *PutLifecycle) Config(config *types.Configuration) *PutLifecycle { +func (r *PutLifecycle) Config(config types.ConfigurationVariant) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Config = config + r.req.Config = config.ConfigurationCaster() return r } -// Name Name automatically assigned to each snapshot created by the policy. Date math +// Name automatically assigned to each snapshot created by the policy. Date math // is supported. To prevent conflicting snapshot names, a UUID is automatically // appended to each snapshot name. // API name: name func (r *PutLifecycle) Name(name string) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Name = &name return r } -// Repository Repository used to store snapshots created by this policy. This repository +// Repository used to store snapshots created by this policy. This repository // must exist prior to the policy’s creation. You can create a repository using // the snapshot repository API. // API name: repository func (r *PutLifecycle) Repository(repository string) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Repository = &repository return r } -// Retention Retention rules used to retain and delete snapshots created by the policy. +// Retention rules used to retain and delete snapshots created by the policy. // API name: retention -func (r *PutLifecycle) Retention(retention *types.Retention) *PutLifecycle { +func (r *PutLifecycle) Retention(retention types.RetentionVariant) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Retention = retention + r.req.Retention = retention.RetentionCaster() return r } -// Schedule Periodic or absolute schedule at which the policy creates snapshots. SLM +// Periodic or absolute schedule at which the policy creates snapshots. SLM // applies schedule changes immediately. // API name: schedule func (r *PutLifecycle) Schedule(cronexpression string) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Schedule = &cronexpression return r diff --git a/typedapi/slm/putlifecycle/request.go b/typedapi/slm/putlifecycle/request.go index e138ac4597..f04747b606 100644 --- a/typedapi/slm/putlifecycle/request.go +++ b/typedapi/slm/putlifecycle/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putlifecycle @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L89 type Request struct { // Config Configuration for each snapshot created by the policy. diff --git a/typedapi/slm/putlifecycle/response.go b/typedapi/slm/putlifecycle/response.go index 195f5a4c44..59aed20cd6 100644 --- a/typedapi/slm/putlifecycle/response.go +++ b/typedapi/slm/putlifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putlifecycle // Response holds the response body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/put_lifecycle/PutSnapshotLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/put_lifecycle/PutSnapshotLifecycleResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/start/response.go b/typedapi/slm/start/response.go index 2fa2885785..0a0d224a8e 100644 --- a/typedapi/slm/start/response.go +++ b/typedapi/slm/start/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package start // Response holds the response body struct for the package start // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/start/StartSnapshotLifecycleManagementResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/start/StartSnapshotLifecycleManagementResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/start/start.go b/typedapi/slm/start/start.go index 39c59fc8d7..b4b45e0c42 100644 --- a/typedapi/slm/start/start.go +++ b/typedapi/slm/start/start.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Turns on snapshot lifecycle management (SLM). +// Start snapshot lifecycle management. +// Snapshot lifecycle management (SLM) starts automatically when a cluster is +// formed. +// Manually starting SLM is necessary only if it has been stopped using the stop +// SLM API. package start import ( @@ -68,7 +72,11 @@ func NewStartFunc(tp elastictransport.Interface) NewStart { } } -// Turns on snapshot lifecycle management (SLM). +// Start snapshot lifecycle management. +// Snapshot lifecycle management (SLM) starts automatically when a cluster is +// formed. +// Manually starting SLM is necessary only if it has been stopped using the stop +// SLM API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.html func New(tp elastictransport.Interface) *Start { @@ -276,6 +284,28 @@ func (r *Start) Header(key, value string) *Start { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Start) MasterTimeout(duration string) *Start { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *Start) Timeout(duration string) *Start { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/slm/stop/response.go b/typedapi/slm/stop/response.go index a09a3bf19b..4679b88c56 100644 --- a/typedapi/slm/stop/response.go +++ b/typedapi/slm/stop/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stop // Response holds the response body struct for the package stop // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/stop/StopSnapshotLifecycleManagementResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/stop/StopSnapshotLifecycleManagementResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/slm/stop/stop.go b/typedapi/slm/stop/stop.go index a691bf7346..d123dbad19 100644 --- a/typedapi/slm/stop/stop.go +++ b/typedapi/slm/stop/stop.go @@ -16,9 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Turns off snapshot lifecycle management (SLM). +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Stop snapshot lifecycle management. +// Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +// This API is useful when you are performing maintenance on a cluster and need +// to prevent SLM from performing any actions on your data streams or indices. +// Stopping SLM does not stop any snapshots that are in progress. +// You can manually trigger snapshots with the run snapshot lifecycle policy API +// even if SLM is stopped. +// +// The API returns a response as soon as the request is acknowledged, but the +// plugin might continue to run until in-progress operations complete and it can +// be safely stopped. +// Use the get snapshot lifecycle management status API to see if SLM is +// running. package stop import ( @@ -68,7 +80,19 @@ func NewStopFunc(tp elastictransport.Interface) NewStop { } } -// Turns off snapshot lifecycle management (SLM). +// Stop snapshot lifecycle management. +// Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +// This API is useful when you are performing maintenance on a cluster and need +// to prevent SLM from performing any actions on your data streams or indices. +// Stopping SLM does not stop any snapshots that are in progress. +// You can manually trigger snapshots with the run snapshot lifecycle policy API +// even if SLM is stopped. +// +// The API returns a response as soon as the request is acknowledged, but the +// plugin might continue to run until in-progress operations complete and it can +// be safely stopped. +// Use the get snapshot lifecycle management status API to see if SLM is +// running. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.html func New(tp elastictransport.Interface) *Stop { @@ -276,6 +300,28 @@ func (r *Stop) Header(key, value string) *Stop { return r } +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Stop) MasterTimeout(duration string) *Stop { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *Stop) Timeout(duration string) *Stop { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/snapshot/cleanuprepository/cleanup_repository.go b/typedapi/snapshot/cleanuprepository/cleanup_repository.go index ed22a57246..29bf2ffd04 100644 --- a/typedapi/snapshot/cleanuprepository/cleanup_repository.go +++ b/typedapi/snapshot/cleanuprepository/cleanup_repository.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Triggers the review of a snapshot repository’s contents and deletes any stale -// data not referenced by existing snapshots. +// Clean up the snapshot repository. +// Trigger the review of the contents of a snapshot repository and delete any +// stale data not referenced by existing snapshots. package cleanuprepository import ( @@ -77,8 +78,9 @@ func NewCleanupRepositoryFunc(tp elastictransport.Interface) NewCleanupRepositor } } -// Triggers the review of a snapshot repository’s contents and deletes any stale -// data not referenced by existing snapshots. +// Clean up the snapshot repository. +// Trigger the review of the contents of a snapshot repository and delete any +// stale data not referenced by existing snapshots. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/clean-up-snapshot-repo-api.html func New(tp elastictransport.Interface) *CleanupRepository { diff --git a/typedapi/snapshot/cleanuprepository/response.go b/typedapi/snapshot/cleanuprepository/response.go index 0dfdc9d615..c613f80ef1 100644 --- a/typedapi/snapshot/cleanuprepository/response.go +++ b/typedapi/snapshot/cleanuprepository/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package cleanuprepository @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package cleanuprepository // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L22-L27 type Response struct { // Results Statistics for cleanup operations. diff --git a/typedapi/snapshot/clone/clone.go b/typedapi/snapshot/clone/clone.go index 515302e6e6..52681b0d77 100644 --- a/typedapi/snapshot/clone/clone.go +++ b/typedapi/snapshot/clone/clone.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Clones indices from one snapshot into another snapshot in the same -// repository. +// Clone a snapshot. +// Clone part of all of a snapshot into another snapshot in the same repository. package clone import ( @@ -92,10 +92,10 @@ func NewCloneFunc(tp elastictransport.Interface) NewClone { } } -// Clones indices from one snapshot into another snapshot in the same -// repository. +// Clone a snapshot. +// Clone part of all of a snapshot into another snapshot in the same repository. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/clone-snapshot-api.html func New(tp elastictransport.Interface) *Clone { r := &Clone{ transport: tp, @@ -103,8 +103,6 @@ func New(tp elastictransport.Interface) *Clone { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -365,13 +363,6 @@ func (r *Clone) MasterTimeout(duration string) *Clone { return r } -// API name: timeout -func (r *Clone) Timeout(duration string) *Clone { - r.values.Set("timeout", duration) - - return r -} - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace @@ -418,6 +409,10 @@ func (r *Clone) Pretty(pretty bool) *Clone { // API name: indices func (r *Clone) Indices(indices string) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Indices = indices diff --git a/typedapi/snapshot/clone/request.go b/typedapi/snapshot/clone/request.go index 12d9d4f650..202f81b558 100644 --- a/typedapi/snapshot/clone/request.go +++ b/typedapi/snapshot/clone/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clone @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/clone/SnapshotCloneRequest.ts#L24-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/clone/SnapshotCloneRequest.ts#L24-L51 type Request struct { Indices string `json:"indices"` } diff --git a/typedapi/snapshot/clone/response.go b/typedapi/snapshot/clone/response.go index 3d59e3854f..a2bdc73512 100644 --- a/typedapi/snapshot/clone/response.go +++ b/typedapi/snapshot/clone/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clone // Response holds the response body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/clone/SnapshotCloneResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/clone/SnapshotCloneResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/snapshot/create/create.go b/typedapi/snapshot/create/create.go index 2e9d230e6e..5df1a8c9e2 100644 --- a/typedapi/snapshot/create/create.go +++ b/typedapi/snapshot/create/create.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a snapshot in a repository. +// Create a snapshot. +// Take a snapshot of a cluster or of data streams and indices. package create import ( @@ -86,9 +87,10 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { } } -// Creates a snapshot in a repository. +// Create a snapshot. +// Take a snapshot of a cluster or of data streams and indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/create-snapshot-api.html func New(tp elastictransport.Interface) *Create { r := &Create{ transport: tp, @@ -96,8 +98,6 @@ func New(tp elastictransport.Interface) *Create { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -395,65 +395,97 @@ func (r *Create) Pretty(pretty bool) *Create { return r } -// FeatureStates Feature states to include in the snapshot. Each feature state includes one or +// Feature states to include in the snapshot. Each feature state includes one or // more system indices containing related data. You can view a list of eligible // features using the get features API. If `include_global_state` is `true`, all // current feature states are included by default. If `include_global_state` is // `false`, no feature states are included by default. // API name: feature_states func (r *Create) FeatureStates(featurestates ...string) *Create { - r.req.FeatureStates = featurestates + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range featurestates { + r.req.FeatureStates = append(r.req.FeatureStates, v) + + } return r } -// IgnoreUnavailable If `true`, the request ignores data streams and indices in `indices` that are +// If `true`, the request ignores data streams and indices in `indices` that are // missing or closed. If `false`, the request returns an error for any data // stream or index that is missing or closed. // API name: ignore_unavailable func (r *Create) IgnoreUnavailable(ignoreunavailable bool) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IgnoreUnavailable = &ignoreunavailable return r } -// IncludeGlobalState If `true`, the current cluster state is included in the snapshot. The cluster +// If `true`, the current cluster state is included in the snapshot. The cluster // state includes persistent cluster settings, composable index templates, // legacy index templates, ingest pipelines, and ILM policies. It also includes // data stored in system indices, such as Watches and task records (configurable // via `feature_states`). // API name: include_global_state func (r *Create) IncludeGlobalState(includeglobalstate bool) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IncludeGlobalState = &includeglobalstate return r } -// Indices Data streams and indices to include in the snapshot. Supports multi-target +// Data streams and indices to include in the snapshot. Supports multi-target // syntax. Includes all data streams and indices by default. // API name: indices func (r *Create) Indices(indices ...string) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Indices = indices return r } -// Metadata Optional metadata for the snapshot. May have any contents. Must be less than +// Optional metadata for the snapshot. May have any contents. Must be less than // 1024 bytes. This map is not automatically generated by Elasticsearch. // API name: metadata -func (r *Create) Metadata(metadata types.Metadata) *Create { - r.req.Metadata = metadata +func (r *Create) Metadata(metadata types.MetadataVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } -// Partial If `true`, allows restoring a partial snapshot of indices with unavailable +// If `true`, allows restoring a partial snapshot of indices with unavailable // shards. Only shards that were successfully included in the snapshot will be // restored. All missing shards will be recreated as empty. If `false`, the // entire restore operation will fail if one or more indices included in the // snapshot do not have all primary shards available. // API name: partial func (r *Create) Partial(partial bool) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Partial = &partial return r diff --git a/typedapi/snapshot/create/request.go b/typedapi/snapshot/create/request.go index f797511a73..0be5829409 100644 --- a/typedapi/snapshot/create/request.go +++ b/typedapi/snapshot/create/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package create @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/create/SnapshotCreateRequest.ts#L24-L81 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/create/SnapshotCreateRequest.ts#L24-L92 type Request struct { // FeatureStates Feature states to include in the snapshot. Each feature state includes one or diff --git a/typedapi/snapshot/create/response.go b/typedapi/snapshot/create/response.go index e7244f02fe..30d030c21f 100644 --- a/typedapi/snapshot/create/response.go +++ b/typedapi/snapshot/create/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package create @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/create/SnapshotCreateResponse.ts#L22-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/create/SnapshotCreateResponse.ts#L22-L35 type Response struct { // Accepted Equals `true` if the snapshot was accepted. Present when the request had diff --git a/typedapi/snapshot/createrepository/create_repository.go b/typedapi/snapshot/createrepository/create_repository.go index 89ea223761..2b8232cb94 100644 --- a/typedapi/snapshot/createrepository/create_repository.go +++ b/typedapi/snapshot/createrepository/create_repository.go @@ -16,9 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Creates a repository. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Create or update a snapshot repository. +// IMPORTANT: If you are migrating searchable snapshots, the repository name +// must be identical in the source and destination clusters. +// To register a snapshot repository, the cluster's global metadata must be +// writeable. +// Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` +// and `clsuter.blocks.read_only_allow_delete` settings) that prevent write +// access. package createrepository import ( @@ -81,7 +88,14 @@ func NewCreateRepositoryFunc(tp elastictransport.Interface) NewCreateRepository } } -// Creates a repository. +// Create or update a snapshot repository. +// IMPORTANT: If you are migrating searchable snapshots, the repository name +// must be identical in the source and destination clusters. +// To register a snapshot repository, the cluster's global metadata must be +// writeable. +// Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` +// and `clsuter.blocks.read_only_allow_delete` settings) that prevent write +// access. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html func New(tp elastictransport.Interface) *CreateRepository { diff --git a/typedapi/snapshot/createrepository/request.go b/typedapi/snapshot/createrepository/request.go index e52452a64f..49b80a4fce 100644 --- a/typedapi/snapshot/createrepository/request.go +++ b/typedapi/snapshot/createrepository/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package createrepository @@ -26,5 +26,5 @@ import ( // Request holds the request body struct for the package createrepository // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L25-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L25-L54 type Request = types.Repository diff --git a/typedapi/snapshot/createrepository/response.go b/typedapi/snapshot/createrepository/response.go index 682e779f32..e4ddfda6de 100644 --- a/typedapi/snapshot/createrepository/response.go +++ b/typedapi/snapshot/createrepository/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package createrepository // Response holds the response body struct for the package createrepository // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/create_repository/SnapshotCreateRepositoryResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/create_repository/SnapshotCreateRepositoryResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/snapshot/delete/delete.go b/typedapi/snapshot/delete/delete.go index 769599b17e..6cff201809 100644 --- a/typedapi/snapshot/delete/delete.go +++ b/typedapi/snapshot/delete/delete.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes one or more snapshots. +// Delete snapshots. package delete import ( @@ -81,9 +81,9 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { } } -// Deletes one or more snapshots. +// Delete snapshots. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-snapshot-api.html func New(tp elastictransport.Interface) *Delete { r := &Delete{ transport: tp, diff --git a/typedapi/snapshot/delete/response.go b/typedapi/snapshot/delete/response.go index 5da26750a3..f1514b9a33 100644 --- a/typedapi/snapshot/delete/response.go +++ b/typedapi/snapshot/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/delete/SnapshotDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/delete/SnapshotDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/snapshot/deleterepository/delete_repository.go b/typedapi/snapshot/deleterepository/delete_repository.go index a03b8ce8a8..4bb294c762 100644 --- a/typedapi/snapshot/deleterepository/delete_repository.go +++ b/typedapi/snapshot/deleterepository/delete_repository.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a repository. +// Delete snapshot repositories. +// When a repository is unregistered, Elasticsearch removes only the reference +// to the location where the repository is storing the snapshots. +// The snapshots themselves are left untouched and in place. package deleterepository import ( @@ -76,9 +79,12 @@ func NewDeleteRepositoryFunc(tp elastictransport.Interface) NewDeleteRepository } } -// Deletes a repository. +// Delete snapshot repositories. +// When a repository is unregistered, Elasticsearch removes only the reference +// to the location where the repository is storing the snapshots. +// The snapshots themselves are left untouched and in place. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-snapshot-repo-api.html func New(tp elastictransport.Interface) *DeleteRepository { r := &DeleteRepository{ transport: tp, diff --git a/typedapi/snapshot/deleterepository/response.go b/typedapi/snapshot/deleterepository/response.go index a5b43d306d..7a636a91ef 100644 --- a/typedapi/snapshot/deleterepository/response.go +++ b/typedapi/snapshot/deleterepository/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleterepository // Response holds the response body struct for the package deleterepository // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/snapshot/get/get.go b/typedapi/snapshot/get/get.go index 6f4b933f07..94b6775cf8 100644 --- a/typedapi/snapshot/get/get.go +++ b/typedapi/snapshot/get/get.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about a snapshot. +// Get snapshot information. package get import ( @@ -83,9 +83,9 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } } -// Returns information about a snapshot. +// Get snapshot information. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-api.html func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, diff --git a/typedapi/snapshot/get/response.go b/typedapi/snapshot/get/response.go index 37128a2d3b..f9ec860373 100644 --- a/typedapi/snapshot/get/response.go +++ b/typedapi/snapshot/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/get/SnapshotGetResponse.ts#L25-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/get/SnapshotGetResponse.ts#L25-L42 type Response struct { // Remaining The number of remaining snapshots that were not returned due to size limits diff --git a/typedapi/snapshot/getrepository/get_repository.go b/typedapi/snapshot/getrepository/get_repository.go index ea3ff56389..54b87f50b9 100644 --- a/typedapi/snapshot/getrepository/get_repository.go +++ b/typedapi/snapshot/getrepository/get_repository.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns information about a repository. +// Get snapshot repository information. package getrepository import ( @@ -74,9 +74,9 @@ func NewGetRepositoryFunc(tp elastictransport.Interface) NewGetRepository { } } -// Returns information about a repository. +// Get snapshot repository information. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-repo-api.html func New(tp elastictransport.Interface) *GetRepository { r := &GetRepository{ transport: tp, diff --git a/typedapi/snapshot/getrepository/response.go b/typedapi/snapshot/getrepository/response.go index 4b13650385..e6066a8421 100644 --- a/typedapi/snapshot/getrepository/response.go +++ b/typedapi/snapshot/getrepository/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getrepository @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package getrepository // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/get_repository/SnapshotGetRepositoryResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/get_repository/SnapshotGetRepositoryResponse.ts#L23-L25 type Response map[string]types.Repository diff --git a/typedapi/snapshot/repositoryanalyze/repository_analyze.go b/typedapi/snapshot/repositoryanalyze/repository_analyze.go new file mode 100644 index 0000000000..12bb6abce0 --- /dev/null +++ b/typedapi/snapshot/repositoryanalyze/repository_analyze.go @@ -0,0 +1,804 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Analyze a snapshot repository. +// Analyze the performance characteristics and any incorrect behaviour found in +// a repository. +// +// The response exposes implementation details of the analysis which may change +// from version to version. +// The response body format is therefore not considered stable and may be +// different in newer versions. +// +// There are a large number of third-party storage systems available, not all of +// which are suitable for use as a snapshot repository by Elasticsearch. +// Some storage systems behave incorrectly, or perform poorly, especially when +// accessed concurrently by multiple clients as the nodes of an Elasticsearch +// cluster do. This API performs a collection of read and write operations on +// your repository which are designed to detect incorrect behaviour and to +// measure the performance characteristics of your storage system. +// +// The default values for the parameters are deliberately low to reduce the +// impact of running an analysis inadvertently and to provide a sensible +// starting point for your investigations. +// Run your first analysis with the default parameter values to check for simple +// problems. +// If successful, run a sequence of increasingly large analyses until you +// encounter a failure or you reach a `blob_count` of at least `2000`, a +// `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, +// and a `register_operation_count` of at least `100`. +// Always specify a generous timeout, possibly `1h` or longer, to allow time for +// each analysis to run to completion. +// Perform the analyses using a multi-node cluster of a similar size to your +// production cluster so that it can detect any problems that only arise when +// the repository is accessed by many nodes at once. +// +// If the analysis fails, Elasticsearch detected that your repository behaved +// unexpectedly. +// This usually means you are using a third-party storage system with an +// incorrect or incompatible implementation of the API it claims to support. +// If so, this storage system is not suitable for use as a snapshot repository. +// You will need to work with the supplier of your storage system to address the +// incompatibilities that Elasticsearch detects. +// +// If the analysis is successful, the API returns details of the testing +// process, optionally including how long each operation took. +// You can use this information to determine the performance of your storage +// system. +// If any operation fails or returns an incorrect result, the API returns an +// error. +// If the API returns an error, it may not have removed all the data it wrote to +// the repository. +// The error will indicate the location of any leftover data and this path is +// also recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the connection from your client to Elasticsearch is closed while the +// client is waiting for the result of the analysis, the test is cancelled. +// Some clients are configured to close their connection if no response is +// received within a certain timeout. +// An analysis takes a long time to complete so you might need to relax any such +// client-side timeouts. +// On cancellation the analysis attempts to clean up the data it was writing, +// but it may not be able to remove it all. +// The path to the leftover data is recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the analysis is successful then it detected no incorrect behaviour, but +// this does not mean that correct behaviour is guaranteed. +// The analysis attempts to detect common bugs but it does not offer 100% +// coverage. +// Additionally, it does not test the following: +// +// * Your repository must perform durable writes. Once a blob has been written +// it must remain in place until it is deleted, even after a power loss or +// similar disaster. +// * Your repository must not suffer from silent data corruption. Once a blob +// has been written, its contents must remain unchanged until it is deliberately +// modified or deleted. +// * Your repository must behave correctly even if connectivity from the cluster +// is disrupted. Reads and writes may fail in this case, but they must not +// return incorrect results. +// +// IMPORTANT: An analysis writes a substantial amount of data to your repository +// and then reads it back again. +// This consumes bandwidth on the network between the cluster and the +// repository, and storage space and I/O bandwidth on the repository itself. +// You must ensure this load does not affect other users of these systems. +// Analyses respect the repository settings `max_snapshot_bytes_per_sec` and +// `max_restore_bytes_per_sec` if available and the cluster setting +// `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth +// they consume. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: Different versions of Elasticsearch may perform different checks for +// repository compatibility, with newer versions typically being stricter than +// older ones. +// A storage system that passes repository analysis with one version of +// Elasticsearch may fail with a different version. +// This indicates it behaves incorrectly in ways that the former version did not +// detect. +// You must work with the supplier of your storage system to address the +// incompatibilities detected by the repository analysis API in any version of +// Elasticsearch. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// *Implementation details* +// +// NOTE: This section of documentation describes how the repository analysis API +// works in this version of Elasticsearch, but you should expect the +// implementation to vary between versions. The request parameters and response +// format depend on details of the implementation so may also be different in +// newer versions. +// +// The analysis comprises a number of blob-level tasks, as set by the +// `blob_count` parameter and a number of compare-and-exchange operations on +// linearizable registers, as set by the `register_operation_count` parameter. +// These tasks are distributed over the data and master-eligible nodes in the +// cluster for execution. +// +// For most blob-level tasks, the executing node first writes a blob to the +// repository and then instructs some of the other nodes in the cluster to +// attempt to read the data it just wrote. +// The size of the blob is chosen randomly, according to the `max_blob_size` and +// `max_total_data_size` parameters. +// If any of these reads fails then the repository does not implement the +// necessary read-after-write semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will instruct some of its peers +// to attempt to read the data before the writing process completes. +// These reads are permitted to fail, but must not return partial data. +// If any read returns partial data then the repository does not implement the +// necessary atomicity semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will overwrite the blob while +// its peers are reading it. +// In this case the data read may come from either the original or the +// overwritten blob, but the read operation must not return partial data or a +// mix of data from the two blobs. +// If any of these reads returns partial data or a mix of the two blobs then the +// repository does not implement the necessary atomicity semantics that +// Elasticsearch requires for overwrites. +// +// The executing node will use a variety of different methods to write the blob. +// For instance, where applicable, it will use both single-part and multi-part +// uploads. +// Similarly, the reading nodes will use a variety of different methods to read +// the data back again. +// For instance they may read the entire blob from start to end or may read only +// a subset of the data. +// +// For some blob-level tasks, the executing node will cancel the write before it +// is complete. +// In this case, it still instructs some of the other nodes in the cluster to +// attempt to read the blob but all of these reads must fail to find the blob. +// +// Linearizable registers are special blobs that Elasticsearch manipulates using +// an atomic compare-and-exchange operation. +// This operation ensures correct and strongly-consistent behavior even when the +// blob is accessed by multiple nodes at the same time. +// The detailed implementation of the compare-and-exchange operation on +// linearizable registers varies by repository type. +// Repository analysis verifies that that uncontended compare-and-exchange +// operations on a linearizable register blob always succeed. +// Repository analysis also verifies that contended operations either succeed or +// report the contention but do not return incorrect results. +// If an operation fails due to contention, Elasticsearch retries the operation +// until it succeeds. +// Most of the compare-and-exchange operations performed by repository analysis +// atomically increment a counter which is represented as an 8-byte blob. +// Some operations also verify the behavior on small blobs with sizes other than +// 8 bytes. +package repositoryanalyze + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RepositoryAnalyze struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRepositoryAnalyze type alias for index. +type NewRepositoryAnalyze func(repository string) *RepositoryAnalyze + +// NewRepositoryAnalyzeFunc returns a new instance of RepositoryAnalyze with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRepositoryAnalyzeFunc(tp elastictransport.Interface) NewRepositoryAnalyze { + return func(repository string) *RepositoryAnalyze { + n := New(tp) + + n._repository(repository) + + return n + } +} + +// Analyze a snapshot repository. +// Analyze the performance characteristics and any incorrect behaviour found in +// a repository. +// +// The response exposes implementation details of the analysis which may change +// from version to version. +// The response body format is therefore not considered stable and may be +// different in newer versions. +// +// There are a large number of third-party storage systems available, not all of +// which are suitable for use as a snapshot repository by Elasticsearch. +// Some storage systems behave incorrectly, or perform poorly, especially when +// accessed concurrently by multiple clients as the nodes of an Elasticsearch +// cluster do. This API performs a collection of read and write operations on +// your repository which are designed to detect incorrect behaviour and to +// measure the performance characteristics of your storage system. +// +// The default values for the parameters are deliberately low to reduce the +// impact of running an analysis inadvertently and to provide a sensible +// starting point for your investigations. +// Run your first analysis with the default parameter values to check for simple +// problems. +// If successful, run a sequence of increasingly large analyses until you +// encounter a failure or you reach a `blob_count` of at least `2000`, a +// `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, +// and a `register_operation_count` of at least `100`. +// Always specify a generous timeout, possibly `1h` or longer, to allow time for +// each analysis to run to completion. +// Perform the analyses using a multi-node cluster of a similar size to your +// production cluster so that it can detect any problems that only arise when +// the repository is accessed by many nodes at once. +// +// If the analysis fails, Elasticsearch detected that your repository behaved +// unexpectedly. +// This usually means you are using a third-party storage system with an +// incorrect or incompatible implementation of the API it claims to support. +// If so, this storage system is not suitable for use as a snapshot repository. +// You will need to work with the supplier of your storage system to address the +// incompatibilities that Elasticsearch detects. +// +// If the analysis is successful, the API returns details of the testing +// process, optionally including how long each operation took. +// You can use this information to determine the performance of your storage +// system. +// If any operation fails or returns an incorrect result, the API returns an +// error. +// If the API returns an error, it may not have removed all the data it wrote to +// the repository. +// The error will indicate the location of any leftover data and this path is +// also recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the connection from your client to Elasticsearch is closed while the +// client is waiting for the result of the analysis, the test is cancelled. +// Some clients are configured to close their connection if no response is +// received within a certain timeout. +// An analysis takes a long time to complete so you might need to relax any such +// client-side timeouts. +// On cancellation the analysis attempts to clean up the data it was writing, +// but it may not be able to remove it all. +// The path to the leftover data is recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the analysis is successful then it detected no incorrect behaviour, but +// this does not mean that correct behaviour is guaranteed. +// The analysis attempts to detect common bugs but it does not offer 100% +// coverage. +// Additionally, it does not test the following: +// +// * Your repository must perform durable writes. Once a blob has been written +// it must remain in place until it is deleted, even after a power loss or +// similar disaster. +// * Your repository must not suffer from silent data corruption. Once a blob +// has been written, its contents must remain unchanged until it is deliberately +// modified or deleted. +// * Your repository must behave correctly even if connectivity from the cluster +// is disrupted. Reads and writes may fail in this case, but they must not +// return incorrect results. +// +// IMPORTANT: An analysis writes a substantial amount of data to your repository +// and then reads it back again. +// This consumes bandwidth on the network between the cluster and the +// repository, and storage space and I/O bandwidth on the repository itself. +// You must ensure this load does not affect other users of these systems. +// Analyses respect the repository settings `max_snapshot_bytes_per_sec` and +// `max_restore_bytes_per_sec` if available and the cluster setting +// `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth +// they consume. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: Different versions of Elasticsearch may perform different checks for +// repository compatibility, with newer versions typically being stricter than +// older ones. +// A storage system that passes repository analysis with one version of +// Elasticsearch may fail with a different version. +// This indicates it behaves incorrectly in ways that the former version did not +// detect. +// You must work with the supplier of your storage system to address the +// incompatibilities detected by the repository analysis API in any version of +// Elasticsearch. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// *Implementation details* +// +// NOTE: This section of documentation describes how the repository analysis API +// works in this version of Elasticsearch, but you should expect the +// implementation to vary between versions. The request parameters and response +// format depend on details of the implementation so may also be different in +// newer versions. +// +// The analysis comprises a number of blob-level tasks, as set by the +// `blob_count` parameter and a number of compare-and-exchange operations on +// linearizable registers, as set by the `register_operation_count` parameter. +// These tasks are distributed over the data and master-eligible nodes in the +// cluster for execution. +// +// For most blob-level tasks, the executing node first writes a blob to the +// repository and then instructs some of the other nodes in the cluster to +// attempt to read the data it just wrote. +// The size of the blob is chosen randomly, according to the `max_blob_size` and +// `max_total_data_size` parameters. +// If any of these reads fails then the repository does not implement the +// necessary read-after-write semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will instruct some of its peers +// to attempt to read the data before the writing process completes. +// These reads are permitted to fail, but must not return partial data. +// If any read returns partial data then the repository does not implement the +// necessary atomicity semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will overwrite the blob while +// its peers are reading it. +// In this case the data read may come from either the original or the +// overwritten blob, but the read operation must not return partial data or a +// mix of data from the two blobs. +// If any of these reads returns partial data or a mix of the two blobs then the +// repository does not implement the necessary atomicity semantics that +// Elasticsearch requires for overwrites. +// +// The executing node will use a variety of different methods to write the blob. +// For instance, where applicable, it will use both single-part and multi-part +// uploads. +// Similarly, the reading nodes will use a variety of different methods to read +// the data back again. +// For instance they may read the entire blob from start to end or may read only +// a subset of the data. +// +// For some blob-level tasks, the executing node will cancel the write before it +// is complete. +// In this case, it still instructs some of the other nodes in the cluster to +// attempt to read the blob but all of these reads must fail to find the blob. +// +// Linearizable registers are special blobs that Elasticsearch manipulates using +// an atomic compare-and-exchange operation. +// This operation ensures correct and strongly-consistent behavior even when the +// blob is accessed by multiple nodes at the same time. +// The detailed implementation of the compare-and-exchange operation on +// linearizable registers varies by repository type. +// Repository analysis verifies that that uncontended compare-and-exchange +// operations on a linearizable register blob always succeed. +// Repository analysis also verifies that contended operations either succeed or +// report the contention but do not return incorrect results. +// If an operation fails due to contention, Elasticsearch retries the operation +// until it succeeds. +// Most of the compare-and-exchange operations performed by repository analysis +// atomically increment a counter which is represented as an 8-byte blob. +// Some operations also verify the behavior on small blobs with sizes other than +// 8 bytes. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/repo-analysis-api.html +func New(tp elastictransport.Interface) *RepositoryAnalyze { + r := &RepositoryAnalyze{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RepositoryAnalyze) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + path.WriteString("_analyze") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RepositoryAnalyze) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.repository_analyze") + if reader := instrument.RecordRequestBody(ctx, "snapshot.repository_analyze", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.repository_analyze") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RepositoryAnalyze query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a repositoryanalyze.Response +func (r RepositoryAnalyze) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r RepositoryAnalyze) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the RepositoryAnalyze query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the RepositoryAnalyze headers map. +func (r *RepositoryAnalyze) Header(key, value string) *RepositoryAnalyze { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the repository. +// API Name: repository +func (r *RepositoryAnalyze) _repository(repository string) *RepositoryAnalyze { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// BlobCount The total number of blobs to write to the repository during the test. +// For realistic experiments, you should set it to at least `2000`. +// API name: blob_count +func (r *RepositoryAnalyze) BlobCount(blobcount int) *RepositoryAnalyze { + r.values.Set("blob_count", strconv.Itoa(blobcount)) + + return r +} + +// Concurrency The number of operations to run concurrently during the test. +// API name: concurrency +func (r *RepositoryAnalyze) Concurrency(concurrency int) *RepositoryAnalyze { + r.values.Set("concurrency", strconv.Itoa(concurrency)) + + return r +} + +// Detailed Indicates whether to return detailed results, including timing information +// for every operation performed during the analysis. +// If false, it returns only a summary of the analysis. +// API name: detailed +func (r *RepositoryAnalyze) Detailed(detailed bool) *RepositoryAnalyze { + r.values.Set("detailed", strconv.FormatBool(detailed)) + + return r +} + +// EarlyReadNodeCount The number of nodes on which to perform an early read operation while writing +// each blob. +// Early read operations are only rarely performed. +// API name: early_read_node_count +func (r *RepositoryAnalyze) EarlyReadNodeCount(earlyreadnodecount int) *RepositoryAnalyze { + r.values.Set("early_read_node_count", strconv.Itoa(earlyreadnodecount)) + + return r +} + +// MaxBlobSize The maximum size of a blob to be written during the test. +// For realistic experiments, you should set it to at least `2gb`. +// API name: max_blob_size +func (r *RepositoryAnalyze) MaxBlobSize(bytesize string) *RepositoryAnalyze { + r.values.Set("max_blob_size", bytesize) + + return r +} + +// MaxTotalDataSize An upper limit on the total size of all the blobs written during the test. +// For realistic experiments, you should set it to at least `1tb`. +// API name: max_total_data_size +func (r *RepositoryAnalyze) MaxTotalDataSize(bytesize string) *RepositoryAnalyze { + r.values.Set("max_total_data_size", bytesize) + + return r +} + +// RareActionProbability The probability of performing a rare action such as an early read, an +// overwrite, or an aborted write on each blob. +// API name: rare_action_probability +func (r *RepositoryAnalyze) RareActionProbability(rareactionprobability string) *RepositoryAnalyze { + r.values.Set("rare_action_probability", rareactionprobability) + + return r +} + +// RarelyAbortWrites Indicates whether to rarely cancel writes before they complete. +// API name: rarely_abort_writes +func (r *RepositoryAnalyze) RarelyAbortWrites(rarelyabortwrites bool) *RepositoryAnalyze { + r.values.Set("rarely_abort_writes", strconv.FormatBool(rarelyabortwrites)) + + return r +} + +// ReadNodeCount The number of nodes on which to read a blob after writing. +// API name: read_node_count +func (r *RepositoryAnalyze) ReadNodeCount(readnodecount int) *RepositoryAnalyze { + r.values.Set("read_node_count", strconv.Itoa(readnodecount)) + + return r +} + +// RegisterOperationCount The minimum number of linearizable register operations to perform in total. +// For realistic experiments, you should set it to at least `100`. +// API name: register_operation_count +func (r *RepositoryAnalyze) RegisterOperationCount(registeroperationcount int) *RepositoryAnalyze { + r.values.Set("register_operation_count", strconv.Itoa(registeroperationcount)) + + return r +} + +// Seed The seed for the pseudo-random number generator used to generate the list of +// operations performed during the test. +// To repeat the same set of operations in multiple experiments, use the same +// seed in each experiment. +// Note that the operations are performed concurrently so might not always +// happen in the same order on each run. +// API name: seed +func (r *RepositoryAnalyze) Seed(seed int) *RepositoryAnalyze { + r.values.Set("seed", strconv.Itoa(seed)) + + return r +} + +// Timeout The period of time to wait for the test to complete. +// If no response is received before the timeout expires, the test is cancelled +// and returns an error. +// API name: timeout +func (r *RepositoryAnalyze) Timeout(duration string) *RepositoryAnalyze { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RepositoryAnalyze) ErrorTrace(errortrace bool) *RepositoryAnalyze { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RepositoryAnalyze) FilterPath(filterpaths ...string) *RepositoryAnalyze { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RepositoryAnalyze) Human(human bool) *RepositoryAnalyze { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RepositoryAnalyze) Pretty(pretty bool) *RepositoryAnalyze { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/snapshot/repositoryanalyze/response.go b/typedapi/snapshot/repositoryanalyze/response.go new file mode 100644 index 0000000000..4560ff9e5a --- /dev/null +++ b/typedapi/snapshot/repositoryanalyze/response.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package repositoryanalyze + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package repositoryanalyze +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L24-L108 +type Response struct { + + // BlobCount The number of blobs written to the repository during the test. + BlobCount int `json:"blob_count"` + // BlobPath The path in the repository under which all the blobs were written during the + // test. + BlobPath string `json:"blob_path"` + // Concurrency The number of write operations performed concurrently during the test. + Concurrency int `json:"concurrency"` + // CoordinatingNode The node that coordinated the analysis and performed the final cleanup. + CoordinatingNode types.SnapshotNodeInfo `json:"coordinating_node"` + // DeleteElapsed The time it took to delete all the blobs in the container. + DeleteElapsed types.Duration `json:"delete_elapsed"` + // DeleteElapsedNanos The time it took to delete all the blobs in the container, in nanoseconds. + DeleteElapsedNanos int64 `json:"delete_elapsed_nanos"` + // Details A description of every read and write operation performed during the test. + Details types.DetailsInfo `json:"details"` + // EarlyReadNodeCount The limit on the number of nodes on which early read operations were + // performed after writing each blob. + EarlyReadNodeCount int `json:"early_read_node_count"` + // IssuesDetected A list of correctness issues detected, which is empty if the API succeeded. + // It is included to emphasize that a successful response does not guarantee + // correct behaviour in future. + IssuesDetected []string `json:"issues_detected"` + // ListingElapsed The time it took to retrieve a list of all the blobs in the container. + ListingElapsed types.Duration `json:"listing_elapsed"` + // ListingElapsedNanos The time it took to retrieve a list of all the blobs in the container, in + // nanoseconds. + ListingElapsedNanos int64 `json:"listing_elapsed_nanos"` + // MaxBlobSize The limit on the size of a blob written during the test. + MaxBlobSize types.ByteSize `json:"max_blob_size"` + // MaxBlobSizeBytes The limit, in bytes, on the size of a blob written during the test. + MaxBlobSizeBytes int64 `json:"max_blob_size_bytes"` + // MaxTotalDataSize The limit on the total size of all blob written during the test. + MaxTotalDataSize types.ByteSize `json:"max_total_data_size"` + // MaxTotalDataSizeBytes The limit, in bytes, on the total size of all blob written during the test. + MaxTotalDataSizeBytes int64 `json:"max_total_data_size_bytes"` + // RareActionProbability The probability of performing rare actions during the test. + RareActionProbability types.Float64 `json:"rare_action_probability"` + // ReadNodeCount The limit on the number of nodes on which read operations were performed + // after writing each blob. + ReadNodeCount int `json:"read_node_count"` + // Repository The name of the repository that was the subject of the analysis. + Repository string `json:"repository"` + // Seed The seed for the pseudo-random number generator used to generate the + // operations used during the test. + Seed int64 `json:"seed"` + // Summary A collection of statistics that summarize the results of the test. + Summary types.SummaryInfo `json:"summary"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go b/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go new file mode 100644 index 0000000000..aabff4bb66 --- /dev/null +++ b/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go @@ -0,0 +1,506 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Verify the repository integrity. +// Verify the integrity of the contents of a snapshot repository. +// +// This API enables you to perform a comprehensive check of the contents of a +// repository, looking for any anomalies in its data or metadata which might +// prevent you from restoring snapshots from the repository or which might cause +// future snapshot create or delete operations to fail. +// +// If you suspect the integrity of the contents of one of your snapshot +// repositories, cease all write activity to this repository immediately, set +// its `read_only` option to `true`, and use this API to verify its integrity. +// Until you do so: +// +// * It may not be possible to restore some snapshots from this repository. +// * Searchable snapshots may report errors when searched or may have unassigned +// shards. +// * Taking snapshots into this repository may fail or may appear to succeed but +// have created a snapshot which cannot be restored. +// * Deleting snapshots from this repository may fail or may appear to succeed +// but leave the underlying data on disk. +// * Continuing to write to the repository while it is in an invalid state may +// causing additional damage to its contents. +// +// If the API finds any problems with the integrity of the contents of your +// repository, Elasticsearch will not be able to repair the damage. +// The only way to bring the repository back into a fully working state after +// its contents have been damaged is by restoring its contents from a repository +// backup which was taken before the damage occurred. +// You must also identify what caused the damage and take action to prevent it +// from happening again. +// +// If you cannot restore a repository backup, register a new repository and use +// this for all future snapshot operations. +// In some cases it may be possible to recover some of the contents of a damaged +// repository, either by restoring as many of its snapshots as needed and taking +// new snapshots of the restored data, or by using the reindex API to copy data +// from any searchable snapshots mounted from the damaged repository. +// +// Avoid all operations which write to the repository while the verify +// repository integrity API is running. +// If something changes the repository contents while an integrity verification +// is running then Elasticsearch may incorrectly report having detected some +// anomalies in its contents due to the concurrent writes. +// It may also incorrectly fail to report some anomalies that the concurrent +// writes prevented it from detecting. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +package repositoryverifyintegrity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RepositoryVerifyIntegrity struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRepositoryVerifyIntegrity type alias for index. +type NewRepositoryVerifyIntegrity func(repository string) *RepositoryVerifyIntegrity + +// NewRepositoryVerifyIntegrityFunc returns a new instance of RepositoryVerifyIntegrity with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRepositoryVerifyIntegrityFunc(tp elastictransport.Interface) NewRepositoryVerifyIntegrity { + return func(repository string) *RepositoryVerifyIntegrity { + n := New(tp) + + n._repository(repository) + + return n + } +} + +// Verify the repository integrity. +// Verify the integrity of the contents of a snapshot repository. +// +// This API enables you to perform a comprehensive check of the contents of a +// repository, looking for any anomalies in its data or metadata which might +// prevent you from restoring snapshots from the repository or which might cause +// future snapshot create or delete operations to fail. +// +// If you suspect the integrity of the contents of one of your snapshot +// repositories, cease all write activity to this repository immediately, set +// its `read_only` option to `true`, and use this API to verify its integrity. +// Until you do so: +// +// * It may not be possible to restore some snapshots from this repository. +// * Searchable snapshots may report errors when searched or may have unassigned +// shards. +// * Taking snapshots into this repository may fail or may appear to succeed but +// have created a snapshot which cannot be restored. +// * Deleting snapshots from this repository may fail or may appear to succeed +// but leave the underlying data on disk. +// * Continuing to write to the repository while it is in an invalid state may +// causing additional damage to its contents. +// +// If the API finds any problems with the integrity of the contents of your +// repository, Elasticsearch will not be able to repair the damage. +// The only way to bring the repository back into a fully working state after +// its contents have been damaged is by restoring its contents from a repository +// backup which was taken before the damage occurred. +// You must also identify what caused the damage and take action to prevent it +// from happening again. +// +// If you cannot restore a repository backup, register a new repository and use +// this for all future snapshot operations. +// In some cases it may be possible to recover some of the contents of a damaged +// repository, either by restoring as many of its snapshots as needed and taking +// new snapshots of the restored data, or by using the reindex API to copy data +// from any searchable snapshots mounted from the damaged repository. +// +// Avoid all operations which write to the repository while the verify +// repository integrity API is running. +// If something changes the repository contents while an integrity verification +// is running then Elasticsearch may incorrectly report having detected some +// anomalies in its contents due to the concurrent writes. +// It may also incorrectly fail to report some anomalies that the concurrent +// writes prevented it from detecting. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/verify-repo-integrity-api.html +func New(tp elastictransport.Interface) *RepositoryVerifyIntegrity { + r := &RepositoryVerifyIntegrity{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RepositoryVerifyIntegrity) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + path.WriteString("_verify_integrity") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RepositoryVerifyIntegrity) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.repository_verify_integrity") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.repository_verify_integrity") + if reader := instrument.RecordRequestBody(ctx, "snapshot.repository_verify_integrity", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.repository_verify_integrity") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RepositoryVerifyIntegrity query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a repositoryverifyintegrity.Response +func (r RepositoryVerifyIntegrity) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_verify_integrity") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := new(Response) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return *response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r RepositoryVerifyIntegrity) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_verify_integrity") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the RepositoryVerifyIntegrity query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the RepositoryVerifyIntegrity headers map. +func (r *RepositoryVerifyIntegrity) Header(key, value string) *RepositoryVerifyIntegrity { + r.headers.Set(key, value) + + return r +} + +// Repository A repository name +// API Name: repository +func (r *RepositoryVerifyIntegrity) _repository(repository string) *RepositoryVerifyIntegrity { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// MetaThreadPoolConcurrency Number of threads to use for reading metadata +// API name: meta_thread_pool_concurrency +func (r *RepositoryVerifyIntegrity) MetaThreadPoolConcurrency(metathreadpoolconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("meta_thread_pool_concurrency", strconv.Itoa(metathreadpoolconcurrency)) + + return r +} + +// BlobThreadPoolConcurrency Number of threads to use for reading blob contents +// API name: blob_thread_pool_concurrency +func (r *RepositoryVerifyIntegrity) BlobThreadPoolConcurrency(blobthreadpoolconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("blob_thread_pool_concurrency", strconv.Itoa(blobthreadpoolconcurrency)) + + return r +} + +// SnapshotVerificationConcurrency Number of snapshots to verify concurrently +// API name: snapshot_verification_concurrency +func (r *RepositoryVerifyIntegrity) SnapshotVerificationConcurrency(snapshotverificationconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("snapshot_verification_concurrency", strconv.Itoa(snapshotverificationconcurrency)) + + return r +} + +// IndexVerificationConcurrency Number of indices to verify concurrently +// API name: index_verification_concurrency +func (r *RepositoryVerifyIntegrity) IndexVerificationConcurrency(indexverificationconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("index_verification_concurrency", strconv.Itoa(indexverificationconcurrency)) + + return r +} + +// IndexSnapshotVerificationConcurrency Number of snapshots to verify concurrently within each index +// API name: index_snapshot_verification_concurrency +func (r *RepositoryVerifyIntegrity) IndexSnapshotVerificationConcurrency(indexsnapshotverificationconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("index_snapshot_verification_concurrency", strconv.Itoa(indexsnapshotverificationconcurrency)) + + return r +} + +// MaxFailedShardSnapshots Maximum permitted number of failed shard snapshots +// API name: max_failed_shard_snapshots +func (r *RepositoryVerifyIntegrity) MaxFailedShardSnapshots(maxfailedshardsnapshots int) *RepositoryVerifyIntegrity { + r.values.Set("max_failed_shard_snapshots", strconv.Itoa(maxfailedshardsnapshots)) + + return r +} + +// VerifyBlobContents Whether to verify the contents of individual blobs +// API name: verify_blob_contents +func (r *RepositoryVerifyIntegrity) VerifyBlobContents(verifyblobcontents bool) *RepositoryVerifyIntegrity { + r.values.Set("verify_blob_contents", strconv.FormatBool(verifyblobcontents)) + + return r +} + +// MaxBytesPerSec Rate limit for individual blob verification +// API name: max_bytes_per_sec +func (r *RepositoryVerifyIntegrity) MaxBytesPerSec(maxbytespersec string) *RepositoryVerifyIntegrity { + r.values.Set("max_bytes_per_sec", maxbytespersec) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RepositoryVerifyIntegrity) ErrorTrace(errortrace bool) *RepositoryVerifyIntegrity { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RepositoryVerifyIntegrity) FilterPath(filterpaths ...string) *RepositoryVerifyIntegrity { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RepositoryVerifyIntegrity) Human(human bool) *RepositoryVerifyIntegrity { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RepositoryVerifyIntegrity) Pretty(pretty bool) *RepositoryVerifyIntegrity { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/snapshot/repositoryverifyintegrity/response.go b/typedapi/snapshot/repositoryverifyintegrity/response.go new file mode 100644 index 0000000000..8da98d8329 --- /dev/null +++ b/typedapi/snapshot/repositoryverifyintegrity/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package repositoryverifyintegrity + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package repositoryverifyintegrity +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityResponse.ts#L22-L24 + +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/typedapi/snapshot/restore/request.go b/typedapi/snapshot/restore/request.go index fca982a86f..6afc2e3075 100644 --- a/typedapi/snapshot/restore/request.go +++ b/typedapi/snapshot/restore/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package restore @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package restore // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/restore/SnapshotRestoreRequest.ts#L25-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/restore/SnapshotRestoreRequest.ts#L25-L78 type Request struct { FeatureStates []string `json:"feature_states,omitempty"` IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` diff --git a/typedapi/snapshot/restore/response.go b/typedapi/snapshot/restore/response.go index 07f342828b..e1089f006b 100644 --- a/typedapi/snapshot/restore/response.go +++ b/typedapi/snapshot/restore/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package restore @@ -26,9 +26,10 @@ import ( // Response holds the response body struct for the package restore // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/restore/SnapshotRestoreResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/restore/SnapshotRestoreResponse.ts#L23-L28 type Response struct { - Snapshot types.SnapshotRestore `json:"snapshot"` + Accepted *bool `json:"accepted,omitempty"` + Snapshot *types.SnapshotRestore `json:"snapshot,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/snapshot/restore/restore.go b/typedapi/snapshot/restore/restore.go index 91970035d5..591f6e7ba9 100644 --- a/typedapi/snapshot/restore/restore.go +++ b/typedapi/snapshot/restore/restore.go @@ -16,9 +16,35 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Restores a snapshot. +// Restore a snapshot. +// Restore a snapshot of a cluster or data streams and indices. +// +// You can restore a snapshot only to a running cluster with an elected master +// node. +// The snapshot repository must be registered and available to the cluster. +// The snapshot and cluster versions must be compatible. +// +// To restore a snapshot, the cluster's global metadata must be writable. Ensure +// there are't any cluster blocks that prevent writes. The restore operation +// ignores index blocks. +// +// Before you restore a data stream, ensure the cluster contains a matching +// index template with data streams enabled. To check, use the index management +// feature in Kibana or the get index template API: +// +// ``` +// GET +// _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +// ``` +// +// If no such template exists, you can create one or restore a cluster state +// that contains one. Without a matching index template, a data stream can't +// roll over or create backing indices. +// +// If your snapshot contains data from App Search or Workplace Search, you must +// restore the Enterprise Search encryption key before you restore the snapshot. package restore import ( @@ -86,9 +112,35 @@ func NewRestoreFunc(tp elastictransport.Interface) NewRestore { } } -// Restores a snapshot. +// Restore a snapshot. +// Restore a snapshot of a cluster or data streams and indices. +// +// You can restore a snapshot only to a running cluster with an elected master +// node. +// The snapshot repository must be registered and available to the cluster. +// The snapshot and cluster versions must be compatible. +// +// To restore a snapshot, the cluster's global metadata must be writable. Ensure +// there are't any cluster blocks that prevent writes. The restore operation +// ignores index blocks. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// Before you restore a data stream, ensure the cluster contains a matching +// index template with data streams enabled. To check, use the index management +// feature in Kibana or the get index template API: +// +// ``` +// GET +// _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +// ``` +// +// If no such template exists, you can create one or restore a cluster state +// that contains one. Without a matching index template, a data stream can't +// roll over or create backing indices. +// +// If your snapshot contains data from App Search or Workplace Search, you must +// restore the Enterprise Search encryption key before you restore the snapshot. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/restore-snapshot-api.html func New(tp elastictransport.Interface) *Restore { r := &Restore{ transport: tp, @@ -96,8 +148,6 @@ func New(tp elastictransport.Interface) *Restore { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -397,20 +447,39 @@ func (r *Restore) Pretty(pretty bool) *Restore { // API name: feature_states func (r *Restore) FeatureStates(featurestates ...string) *Restore { - r.req.FeatureStates = featurestates + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range featurestates { + r.req.FeatureStates = append(r.req.FeatureStates, v) + + } return r } // API name: ignore_index_settings func (r *Restore) IgnoreIndexSettings(ignoreindexsettings ...string) *Restore { - r.req.IgnoreIndexSettings = ignoreindexsettings + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoreindexsettings { + + r.req.IgnoreIndexSettings = append(r.req.IgnoreIndexSettings, v) + } return r } // API name: ignore_unavailable func (r *Restore) IgnoreUnavailable(ignoreunavailable bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IgnoreUnavailable = &ignoreunavailable return r @@ -418,6 +487,11 @@ func (r *Restore) IgnoreUnavailable(ignoreunavailable bool) *Restore { // API name: include_aliases func (r *Restore) IncludeAliases(includealiases bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IncludeAliases = &includealiases return r @@ -425,21 +499,35 @@ func (r *Restore) IncludeAliases(includealiases bool) *Restore { // API name: include_global_state func (r *Restore) IncludeGlobalState(includeglobalstate bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IncludeGlobalState = &includeglobalstate return r } // API name: index_settings -func (r *Restore) IndexSettings(indexsettings *types.IndexSettings) *Restore { +func (r *Restore) IndexSettings(indexsettings types.IndexSettingsVariant) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.IndexSettings = indexsettings + r.req.IndexSettings = indexsettings.IndexSettingsCaster() return r } // API name: indices func (r *Restore) Indices(indices ...string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Indices = indices return r @@ -447,6 +535,11 @@ func (r *Restore) Indices(indices ...string) *Restore { // API name: partial func (r *Restore) Partial(partial bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Partial = &partial return r @@ -454,6 +547,10 @@ func (r *Restore) Partial(partial bool) *Restore { // API name: rename_pattern func (r *Restore) RenamePattern(renamepattern string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RenamePattern = &renamepattern @@ -462,6 +559,10 @@ func (r *Restore) RenamePattern(renamepattern string) *Restore { // API name: rename_replacement func (r *Restore) RenameReplacement(renamereplacement string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.RenameReplacement = &renamereplacement diff --git a/typedapi/snapshot/status/response.go b/typedapi/snapshot/status/response.go index 05dc9b2a5c..e6393b9e7a 100644 --- a/typedapi/snapshot/status/response.go +++ b/typedapi/snapshot/status/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package status @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package status // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/status/SnapshotStatusResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/status/SnapshotStatusResponse.ts#L22-L24 type Response struct { Snapshots []types.Status `json:"snapshots"` } diff --git a/typedapi/snapshot/status/status.go b/typedapi/snapshot/status/status.go index 721012df19..354379658c 100644 --- a/typedapi/snapshot/status/status.go +++ b/typedapi/snapshot/status/status.go @@ -16,9 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Returns information about the status of a snapshot. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Get the snapshot status. +// Get a detailed description of the current state for each shard participating +// in the snapshot. +// Note that this API should be used only to obtain detailed shard-level +// information for ongoing snapshots. +// If this detail is not needed or you want to obtain information about one or +// more existing snapshots, use the get snapshot API. +// +// WARNING: Using the API to return the status of any snapshots other than +// currently running snapshots can be expensive. +// The API requires a read from the repository for each shard in each snapshot. +// For example, if you have 100 snapshots with 1,000 shards each, an API request +// that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 +// shards). +// +// Depending on the latency of your storage, such requests can take an extremely +// long time to return results. +// These requests can also tax machine resources and, when using cloud storage, +// incur high processing costs. package status import ( @@ -77,9 +95,27 @@ func NewStatusFunc(tp elastictransport.Interface) NewStatus { } } -// Returns information about the status of a snapshot. +// Get the snapshot status. +// Get a detailed description of the current state for each shard participating +// in the snapshot. +// Note that this API should be used only to obtain detailed shard-level +// information for ongoing snapshots. +// If this detail is not needed or you want to obtain information about one or +// more existing snapshots, use the get snapshot API. +// +// WARNING: Using the API to return the status of any snapshots other than +// currently running snapshots can be expensive. +// The API requires a read from the repository for each shard in each snapshot. +// For example, if you have 100 snapshots with 1,000 shards each, an API request +// that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 +// shards). +// +// Depending on the latency of your storage, such requests can take an extremely +// long time to return results. +// These requests can also tax machine resources and, when using cloud storage, +// incur high processing costs. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-status-api.html func New(tp elastictransport.Interface) *Status { r := &Status{ transport: tp, diff --git a/typedapi/snapshot/verifyrepository/response.go b/typedapi/snapshot/verifyrepository/response.go index dadb6fcf64..7483ac148a 100644 --- a/typedapi/snapshot/verifyrepository/response.go +++ b/typedapi/snapshot/verifyrepository/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package verifyrepository @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package verifyrepository // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L23-L25 type Response struct { Nodes map[string]types.CompactNodeInfo `json:"nodes"` } diff --git a/typedapi/snapshot/verifyrepository/verify_repository.go b/typedapi/snapshot/verifyrepository/verify_repository.go index 79b9726c3c..3f19d3363d 100644 --- a/typedapi/snapshot/verifyrepository/verify_repository.go +++ b/typedapi/snapshot/verifyrepository/verify_repository.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Verifies a repository. +// Verify a snapshot repository. +// Check for common misconfigurations in a snapshot repository. package verifyrepository import ( @@ -76,9 +77,10 @@ func NewVerifyRepositoryFunc(tp elastictransport.Interface) NewVerifyRepository } } -// Verifies a repository. +// Verify a snapshot repository. +// Check for common misconfigurations in a snapshot repository. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/verify-snapshot-repo-api.html func New(tp elastictransport.Interface) *VerifyRepository { r := &VerifyRepository{ transport: tp, diff --git a/typedapi/sql/clearcursor/clear_cursor.go b/typedapi/sql/clearcursor/clear_cursor.go index 843fdc45ea..2097f77f0c 100644 --- a/typedapi/sql/clearcursor/clear_cursor.go +++ b/typedapi/sql/clearcursor/clear_cursor.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Clears the SQL cursor +// Clear an SQL search cursor. package clearcursor import ( @@ -73,7 +73,7 @@ func NewClearCursorFunc(tp elastictransport.Interface) NewClearCursor { } } -// Clears the SQL cursor +// Clear an SQL search cursor. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html func New(tp elastictransport.Interface) *ClearCursor { @@ -83,8 +83,6 @@ func New(tp elastictransport.Interface) *ClearCursor { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -336,9 +334,13 @@ func (r *ClearCursor) Pretty(pretty bool) *ClearCursor { return r } -// Cursor Cursor to clear. +// Cursor to clear. // API name: cursor func (r *ClearCursor) Cursor(cursor string) *ClearCursor { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Cursor = cursor diff --git a/typedapi/sql/clearcursor/request.go b/typedapi/sql/clearcursor/request.go index aea365d1ad..976d9c5c7a 100644 --- a/typedapi/sql/clearcursor/request.go +++ b/typedapi/sql/clearcursor/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearcursor @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package clearcursor // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/clear_cursor/ClearSqlCursorRequest.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/clear_cursor/ClearSqlCursorRequest.ts#L22-L42 type Request struct { // Cursor Cursor to clear. diff --git a/typedapi/sql/clearcursor/response.go b/typedapi/sql/clearcursor/response.go index a5c0566930..d726c34f4e 100644 --- a/typedapi/sql/clearcursor/response.go +++ b/typedapi/sql/clearcursor/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package clearcursor // Response holds the response body struct for the package clearcursor // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/clear_cursor/ClearSqlCursorResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/clear_cursor/ClearSqlCursorResponse.ts#L20-L22 type Response struct { Succeeded bool `json:"succeeded"` } diff --git a/typedapi/sql/deleteasync/delete_async.go b/typedapi/sql/deleteasync/delete_async.go index 3be8790e80..1990f9dee0 100644 --- a/typedapi/sql/deleteasync/delete_async.go +++ b/typedapi/sql/deleteasync/delete_async.go @@ -16,10 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes an async SQL search or a stored synchronous SQL search. If the search -// is still running, the API cancels it. +// Delete an async SQL search. +// Delete an async SQL search or a stored synchronous SQL search. +// If the search is still running, the API cancels it. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a search: +// +// * Users with the `cancel_task` cluster privilege. +// * The user who first submitted the search. package deleteasync import ( @@ -77,8 +84,15 @@ func NewDeleteAsyncFunc(tp elastictransport.Interface) NewDeleteAsync { } } -// Deletes an async SQL search or a stored synchronous SQL search. If the search -// is still running, the API cancels it. +// Delete an async SQL search. +// Delete an async SQL search or a stored synchronous SQL search. +// If the search is still running, the API cancels it. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a search: +// +// * Users with the `cancel_task` cluster privilege. +// * The user who first submitted the search. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-async-sql-search-api.html func New(tp elastictransport.Interface) *DeleteAsync { @@ -294,7 +308,7 @@ func (r *DeleteAsync) Header(key, value string) *DeleteAsync { return r } -// Id Identifier for the search. +// Id The identifier for the search. // API Name: id func (r *DeleteAsync) _id(id string) *DeleteAsync { r.paramSet |= idMask diff --git a/typedapi/sql/deleteasync/response.go b/typedapi/sql/deleteasync/response.go index a98b55d998..fcbde4fb58 100644 --- a/typedapi/sql/deleteasync/response.go +++ b/typedapi/sql/deleteasync/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deleteasync // Response holds the response body struct for the package deleteasync // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/delete_async/SqlDeleteAsyncResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/delete_async/SqlDeleteAsyncResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/sql/getasync/get_async.go b/typedapi/sql/getasync/get_async.go index c12ec591f9..d8fb188617 100644 --- a/typedapi/sql/getasync/get_async.go +++ b/typedapi/sql/getasync/get_async.go @@ -16,10 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the current status and available results for an async SQL search or -// stored synchronous SQL search +// Get async SQL search results. +// Get the current status and available results for an async SQL search or +// stored synchronous SQL search. +// +// If the Elasticsearch security features are enabled, only the user who first +// submitted the SQL search can retrieve the search using this API. package getasync import ( @@ -77,8 +81,12 @@ func NewGetAsyncFunc(tp elastictransport.Interface) NewGetAsync { } } -// Returns the current status and available results for an async SQL search or -// stored synchronous SQL search +// Get async SQL search results. +// Get the current status and available results for an async SQL search or +// stored synchronous SQL search. +// +// If the Elasticsearch security features are enabled, only the user who first +// submitted the SQL search can retrieve the search using this API. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-api.html func New(tp elastictransport.Interface) *GetAsync { @@ -292,7 +300,7 @@ func (r *GetAsync) Header(key, value string) *GetAsync { return r } -// Id Identifier for the search. +// Id The identifier for the search. // API Name: id func (r *GetAsync) _id(id string) *GetAsync { r.paramSet |= idMask @@ -301,8 +309,8 @@ func (r *GetAsync) _id(id string) *GetAsync { return r } -// Delimiter Separator for CSV results. The API only supports this parameter for CSV -// responses. +// Delimiter The separator for CSV results. +// The API supports this parameter only for CSV responses. // API name: delimiter func (r *GetAsync) Delimiter(delimiter string) *GetAsync { r.values.Set("delimiter", delimiter) @@ -310,9 +318,9 @@ func (r *GetAsync) Delimiter(delimiter string) *GetAsync { return r } -// Format Format for the response. You must specify a format using this parameter or -// the -// Accept HTTP header. If you specify both, the API uses this parameter. +// Format The format for the response. +// You must specify a format using this parameter or the `Accept` HTTP header. +// If you specify both, the API uses this parameter. // API name: format func (r *GetAsync) Format(format string) *GetAsync { r.values.Set("format", format) @@ -320,8 +328,8 @@ func (r *GetAsync) Format(format string) *GetAsync { return r } -// KeepAlive Retention period for the search and its results. Defaults -// to the `keep_alive` period for the original SQL search. +// KeepAlive The retention period for the search and its results. +// It defaults to the `keep_alive` period for the original SQL search. // API name: keep_alive func (r *GetAsync) KeepAlive(duration string) *GetAsync { r.values.Set("keep_alive", duration) @@ -329,8 +337,9 @@ func (r *GetAsync) KeepAlive(duration string) *GetAsync { return r } -// WaitForCompletionTimeout Period to wait for complete results. Defaults to no timeout, -// meaning the request waits for complete search results. +// WaitForCompletionTimeout The period to wait for complete results. +// It defaults to no timeout, meaning the request waits for complete search +// results. // API name: wait_for_completion_timeout func (r *GetAsync) WaitForCompletionTimeout(duration string) *GetAsync { r.values.Set("wait_for_completion_timeout", duration) diff --git a/typedapi/sql/getasync/response.go b/typedapi/sql/getasync/response.go index a8a77853ca..ddc42ed345 100644 --- a/typedapi/sql/getasync/response.go +++ b/typedapi/sql/getasync/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getasync @@ -28,35 +28,36 @@ import ( // Response holds the response body struct for the package getasync // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/get_async/SqlGetAsyncResponse.ts#L23-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/get_async/SqlGetAsyncResponse.ts#L23-L60 type Response struct { // Columns Column headings for the search results. Each object is a column. Columns []types.Column `json:"columns,omitempty"` - // Cursor Cursor for the next set of paginated results. For CSV, TSV, and - // TXT responses, this value is returned in the `Cursor` HTTP header. + // Cursor The cursor for the next set of paginated results. + // For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP + // header. Cursor *string `json:"cursor,omitempty"` - // Id Identifier for the search. This value is only returned for async and saved - // synchronous searches. For CSV, TSV, and TXT responses, this value is returned - // in the `Async-ID` HTTP header. + // Id Identifier for the search. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` + // HTTP header. Id string `json:"id"` - // IsPartial If `true`, the response does not contain complete search results. If - // `is_partial` - // is `true` and `is_running` is `true`, the search is still running. If - // `is_partial` - // is `true` but `is_running` is `false`, the results are partial due to a - // failure or - // timeout. This value is only returned for async and saved synchronous - // searches. + // IsPartial If `true`, the response does not contain complete search results. + // If `is_partial` is `true` and `is_running` is `true`, the search is still + // running. + // If `is_partial` is `true` but `is_running` is `false`, the results are + // partial due to a failure or timeout. + // This value is returned only for async and saved synchronous searches. // For CSV, TSV, and TXT responses, this value is returned in the // `Async-partial` HTTP header. IsPartial bool `json:"is_partial"` - // IsRunning If `true`, the search is still running. If false, the search has finished. - // This value is only returned for async and saved synchronous searches. For - // CSV, TSV, and TXT responses, this value is returned in the `Async-partial` - // HTTP header. + // IsRunning If `true`, the search is still running. + // If `false`, the search has finished. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the + // `Async-partial` HTTP header. IsRunning bool `json:"is_running"` - // Rows Values for the search results. + // Rows The values for the search results. Rows [][]json.RawMessage `json:"rows"` } diff --git a/typedapi/sql/getasyncstatus/get_async_status.go b/typedapi/sql/getasyncstatus/get_async_status.go index e2f0e00aab..426068b1d8 100644 --- a/typedapi/sql/getasyncstatus/get_async_status.go +++ b/typedapi/sql/getasyncstatus/get_async_status.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Returns the current status of an async SQL search or a stored synchronous SQL -// search +// Get the async SQL search status. +// Get the current status of an async SQL search or a stored synchronous SQL +// search. package getasyncstatus import ( @@ -77,8 +78,9 @@ func NewGetAsyncStatusFunc(tp elastictransport.Interface) NewGetAsyncStatus { } } -// Returns the current status of an async SQL search or a stored synchronous SQL -// search +// Get the async SQL search status. +// Get the current status of an async SQL search or a stored synchronous SQL +// search. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-status-api.html func New(tp elastictransport.Interface) *GetAsyncStatus { @@ -294,7 +296,7 @@ func (r *GetAsyncStatus) Header(key, value string) *GetAsyncStatus { return r } -// Id Identifier for the search. +// Id The identifier for the search. // API Name: id func (r *GetAsyncStatus) _id(id string) *GetAsyncStatus { r.paramSet |= idMask diff --git a/typedapi/sql/getasyncstatus/response.go b/typedapi/sql/getasyncstatus/response.go index 98d61fdd89..53f5afb64b 100644 --- a/typedapi/sql/getasyncstatus/response.go +++ b/typedapi/sql/getasyncstatus/response.go @@ -16,36 +16,34 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getasyncstatus // Response holds the response body struct for the package getasyncstatus // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/get_async_status/SqlGetAsyncStatusResponse.ts#L23-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/get_async_status/SqlGetAsyncStatusResponse.ts#L23-L55 type Response struct { - // CompletionStatus HTTP status code for the search. The API only returns this property for - // completed searches. + // CompletionStatus The HTTP status code for the search. + // The API returns this property only for completed searches. CompletionStatus *uint `json:"completion_status,omitempty"` - // ExpirationTimeInMillis Timestamp, in milliseconds since the Unix epoch, when Elasticsearch will - // delete - // the search and its results, even if the search is still running. + // ExpirationTimeInMillis The timestamp, in milliseconds since the Unix epoch, when Elasticsearch will + // delete the search and its results, even if the search is still running. ExpirationTimeInMillis int64 `json:"expiration_time_in_millis"` - // Id Identifier for the search. + // Id The identifier for the search. Id string `json:"id"` - // IsPartial If `true`, the response does not contain complete search results. If - // `is_partial` - // is `true` and `is_running` is `true`, the search is still running. If - // `is_partial` - // is `true` but `is_running` is `false`, the results are partial due to a - // failure or - // timeout. + // IsPartial If `true`, the response does not contain complete search results. + // If `is_partial` is `true` and `is_running` is `true`, the search is still + // running. + // If `is_partial` is `true` but `is_running` is `false`, the results are + // partial due to a failure or timeout. IsPartial bool `json:"is_partial"` - // IsRunning If `true`, the search is still running. If `false`, the search has finished. + // IsRunning If `true`, the search is still running. + // If `false`, the search has finished. IsRunning bool `json:"is_running"` - // StartTimeInMillis Timestamp, in milliseconds since the Unix epoch, when the search started. - // The API only returns this property for running searches. + // StartTimeInMillis The timestamp, in milliseconds since the Unix epoch, when the search started. + // The API returns this property only for running searches. StartTimeInMillis int64 `json:"start_time_in_millis"` } diff --git a/typedapi/sql/query/query.go b/typedapi/sql/query/query.go index 66db077a6c..a2fec850ae 100644 --- a/typedapi/sql/query/query.go +++ b/typedapi/sql/query/query.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Executes a SQL request +// Get SQL search results. +// Run an SQL request. package query import ( @@ -35,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sqlformat" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -73,7 +75,8 @@ func NewQueryFunc(tp elastictransport.Interface) NewQuery { } } -// Executes a SQL request +// Get SQL search results. +// Run an SQL request. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html func New(tp elastictransport.Interface) *Query { @@ -83,8 +86,6 @@ func New(tp elastictransport.Interface) *Query { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -290,10 +291,13 @@ func (r *Query) Header(key, value string) *Query { return r } -// Format Format for the response. +// Format The format for the response. +// You can also specify a format using the `Accept` HTTP header. +// If you specify both this parameter and the `Accept` HTTP header, this +// parameter takes precedence. // API name: format -func (r *Query) Format(format string) *Query { - r.values.Set("format", format) +func (r *Query) Format(format sqlformat.SqlFormat) *Query { + r.values.Set("format", format.String()) return r } @@ -342,147 +346,266 @@ func (r *Query) Pretty(pretty bool) *Query { return r } -// Catalog Default catalog (cluster) for queries. If unspecified, the queries execute on -// the data in the local cluster only. +// If `true`, the response has partial results when there are shard request +// timeouts or shard failures. +// If `false`, the API returns an error with no partial results. +// API name: allow_partial_search_results +func (r *Query) AllowPartialSearchResults(allowpartialsearchresults bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowPartialSearchResults = &allowpartialsearchresults + + return r +} + +// The default catalog (cluster) for queries. +// If unspecified, the queries execute on the data in the local cluster only. // API name: catalog func (r *Query) Catalog(catalog string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Catalog = &catalog return r } -// Columnar If true, the results in a columnar fashion: one row represents all the values -// of a certain column from the current page of results. +// If `true`, the results are in a columnar fashion: one row represents all the +// values of a certain column from the current page of results. +// The API supports this parameter only for CBOR, JSON, SMILE, and YAML +// responses. // API name: columnar func (r *Query) Columnar(columnar bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Columnar = &columnar return r } -// Cursor Cursor used to retrieve a set of paginated results. +// The cursor used to retrieve a set of paginated results. // If you specify a cursor, the API only uses the `columnar` and `time_zone` // request body parameters. // It ignores other request body parameters. // API name: cursor func (r *Query) Cursor(cursor string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Cursor = &cursor return r } -// FetchSize The maximum number of rows (or entries) to return in one response +// The maximum number of rows (or entries) to return in one response. // API name: fetch_size func (r *Query) FetchSize(fetchsize int) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FetchSize = &fetchsize return r } -// FieldMultiValueLeniency Throw an exception when encountering multiple values for a field (default) or -// be lenient and return the first value from the list (without any guarantees -// of what that will be - typically the first in natural ascending order). +// If `false`, the API returns an exception when encountering multiple values +// for a field. +// If `true`, the API is lenient and returns the first value from the array with +// no guarantee of consistent results. // API name: field_multi_value_leniency func (r *Query) FieldMultiValueLeniency(fieldmultivalueleniency bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FieldMultiValueLeniency = &fieldmultivalueleniency return r } -// Filter Elasticsearch query DSL for additional filtering. +// The Elasticsearch query DSL for additional filtering. // API name: filter -func (r *Query) Filter(filter *types.Query) *Query { +func (r *Query) Filter(filter types.QueryVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.Filter = filter.QueryCaster() return r } -// IndexUsingFrozen If true, the search can run on frozen indices. Defaults to false. +// If `true`, the search can run on frozen indices. // API name: index_using_frozen func (r *Query) IndexUsingFrozen(indexusingfrozen bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexUsingFrozen = &indexusingfrozen return r } -// KeepAlive Retention period for an async or saved synchronous search. +// The retention period for an async or saved synchronous search. // API name: keep_alive -func (r *Query) KeepAlive(duration types.Duration) *Query { - r.req.KeepAlive = duration +func (r *Query) KeepAlive(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepAlive = *duration.DurationCaster() return r } -// KeepOnCompletion If true, Elasticsearch stores synchronous searches if you also specify the -// wait_for_completion_timeout parameter. If false, Elasticsearch only stores -// async searches that don’t finish before the wait_for_completion_timeout. +// If `true`, Elasticsearch stores synchronous searches if you also specify the +// `wait_for_completion_timeout` parameter. +// If `false`, Elasticsearch only stores async searches that don't finish before +// the `wait_for_completion_timeout`. // API name: keep_on_completion func (r *Query) KeepOnCompletion(keeponcompletion bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.KeepOnCompletion = &keeponcompletion return r } -// PageTimeout The timeout before a pagination request fails. +// The minimum retention period for the scroll cursor. +// After this time period, a pagination request might fail because the scroll +// cursor is no longer available. +// Subsequent scroll requests prolong the lifetime of the scroll cursor by the +// duration of `page_timeout` in the scroll request. // API name: page_timeout -func (r *Query) PageTimeout(duration types.Duration) *Query { - r.req.PageTimeout = duration +func (r *Query) PageTimeout(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PageTimeout = *duration.DurationCaster() return r } -// Params Values for parameters in the query. +// The values for parameters in the query. // API name: params func (r *Query) Params(params map[string]json.RawMessage) *Query { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Params = params + return r +} + +func (r *Query) AddParam(key string, value json.RawMessage) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + tmp[key] = value + + r.req.Params = tmp return r } -// Query SQL query to run. +// The SQL query to run. // API name: query func (r *Query) Query(query string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Query = &query return r } -// RequestTimeout The timeout before the request fails. +// The timeout before the request fails. // API name: request_timeout -func (r *Query) RequestTimeout(duration types.Duration) *Query { - r.req.RequestTimeout = duration +func (r *Query) RequestTimeout(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RequestTimeout = *duration.DurationCaster() return r } -// RuntimeMappings Defines one or more runtime fields in the search request. These fields take -// precedence over mapped fields with the same name. +// One or more runtime fields for the search request. +// These fields take precedence over mapped fields with the same name. // API name: runtime_mappings -func (r *Query) RuntimeMappings(runtimefields types.RuntimeFields) *Query { - r.req.RuntimeMappings = runtimefields +func (r *Query) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() return r } -// TimeZone ISO-8601 time zone ID for the search. +// The ISO-8601 time zone ID for the search. // API name: time_zone func (r *Query) TimeZone(timezone string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TimeZone = &timezone return r } -// WaitForCompletionTimeout Period to wait for complete results. Defaults to no timeout, meaning the -// request waits for complete search results. If the search doesn’t finish -// within this period, the search becomes async. +// The period to wait for complete results. +// It defaults to no timeout, meaning the request waits for complete search +// results. +// If the search doesn't finish within this period, the search becomes async. +// +// To save a synchronous search, you must specify this parameter and the +// `keep_on_completion` parameter. // API name: wait_for_completion_timeout -func (r *Query) WaitForCompletionTimeout(duration types.Duration) *Query { - r.req.WaitForCompletionTimeout = duration +func (r *Query) WaitForCompletionTimeout(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WaitForCompletionTimeout = *duration.DurationCaster() return r } diff --git a/typedapi/sql/query/request.go b/typedapi/sql/query/request.go index 68dcb9e0f3..dbc3f12942 100644 --- a/typedapi/sql/query/request.go +++ b/typedapi/sql/query/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package query @@ -33,52 +33,68 @@ import ( // Request holds the request body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/query/QuerySqlRequest.ts#L28-L122 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/query/QuerySqlRequest.ts#L28-L152 type Request struct { - // Catalog Default catalog (cluster) for queries. If unspecified, the queries execute on - // the data in the local cluster only. + // AllowPartialSearchResults If `true`, the response has partial results when there are shard request + // timeouts or shard failures. + // If `false`, the API returns an error with no partial results. + AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` + // Catalog The default catalog (cluster) for queries. + // If unspecified, the queries execute on the data in the local cluster only. Catalog *string `json:"catalog,omitempty"` - // Columnar If true, the results in a columnar fashion: one row represents all the values - // of a certain column from the current page of results. + // Columnar If `true`, the results are in a columnar fashion: one row represents all the + // values of a certain column from the current page of results. + // The API supports this parameter only for CBOR, JSON, SMILE, and YAML + // responses. Columnar *bool `json:"columnar,omitempty"` - // Cursor Cursor used to retrieve a set of paginated results. + // Cursor The cursor used to retrieve a set of paginated results. // If you specify a cursor, the API only uses the `columnar` and `time_zone` // request body parameters. // It ignores other request body parameters. Cursor *string `json:"cursor,omitempty"` - // FetchSize The maximum number of rows (or entries) to return in one response + // FetchSize The maximum number of rows (or entries) to return in one response. FetchSize *int `json:"fetch_size,omitempty"` - // FieldMultiValueLeniency Throw an exception when encountering multiple values for a field (default) or - // be lenient and return the first value from the list (without any guarantees - // of what that will be - typically the first in natural ascending order). + // FieldMultiValueLeniency If `false`, the API returns an exception when encountering multiple values + // for a field. + // If `true`, the API is lenient and returns the first value from the array with + // no guarantee of consistent results. FieldMultiValueLeniency *bool `json:"field_multi_value_leniency,omitempty"` - // Filter Elasticsearch query DSL for additional filtering. + // Filter The Elasticsearch query DSL for additional filtering. Filter *types.Query `json:"filter,omitempty"` - // IndexUsingFrozen If true, the search can run on frozen indices. Defaults to false. + // IndexUsingFrozen If `true`, the search can run on frozen indices. IndexUsingFrozen *bool `json:"index_using_frozen,omitempty"` - // KeepAlive Retention period for an async or saved synchronous search. + // KeepAlive The retention period for an async or saved synchronous search. KeepAlive types.Duration `json:"keep_alive,omitempty"` - // KeepOnCompletion If true, Elasticsearch stores synchronous searches if you also specify the - // wait_for_completion_timeout parameter. If false, Elasticsearch only stores - // async searches that don’t finish before the wait_for_completion_timeout. + // KeepOnCompletion If `true`, Elasticsearch stores synchronous searches if you also specify the + // `wait_for_completion_timeout` parameter. + // If `false`, Elasticsearch only stores async searches that don't finish before + // the `wait_for_completion_timeout`. KeepOnCompletion *bool `json:"keep_on_completion,omitempty"` - // PageTimeout The timeout before a pagination request fails. + // PageTimeout The minimum retention period for the scroll cursor. + // After this time period, a pagination request might fail because the scroll + // cursor is no longer available. + // Subsequent scroll requests prolong the lifetime of the scroll cursor by the + // duration of `page_timeout` in the scroll request. PageTimeout types.Duration `json:"page_timeout,omitempty"` - // Params Values for parameters in the query. + // Params The values for parameters in the query. Params map[string]json.RawMessage `json:"params,omitempty"` - // Query SQL query to run. + // Query The SQL query to run. Query *string `json:"query,omitempty"` // RequestTimeout The timeout before the request fails. RequestTimeout types.Duration `json:"request_timeout,omitempty"` - // RuntimeMappings Defines one or more runtime fields in the search request. These fields take - // precedence over mapped fields with the same name. + // RuntimeMappings One or more runtime fields for the search request. + // These fields take precedence over mapped fields with the same name. RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` - // TimeZone ISO-8601 time zone ID for the search. + // TimeZone The ISO-8601 time zone ID for the search. TimeZone *string `json:"time_zone,omitempty"` - // WaitForCompletionTimeout Period to wait for complete results. Defaults to no timeout, meaning the - // request waits for complete search results. If the search doesn’t finish - // within this period, the search becomes async. + // WaitForCompletionTimeout The period to wait for complete results. + // It defaults to no timeout, meaning the request waits for complete search + // results. + // If the search doesn't finish within this period, the search becomes async. + // + // To save a synchronous search, you must specify this parameter and the + // `keep_on_completion` parameter. WaitForCompletionTimeout types.Duration `json:"wait_for_completion_timeout,omitempty"` } @@ -117,6 +133,20 @@ func (s *Request) UnmarshalJSON(data []byte) error { switch t { + case "allow_partial_search_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSearchResults", err) + } + s.AllowPartialSearchResults = &value + case bool: + s.AllowPartialSearchResults = &v + } + case "catalog": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { diff --git a/typedapi/sql/query/response.go b/typedapi/sql/query/response.go index 7ef61d222b..ac4e8dae09 100644 --- a/typedapi/sql/query/response.go +++ b/typedapi/sql/query/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package query @@ -28,35 +28,36 @@ import ( // Response holds the response body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/query/QuerySqlResponse.ts#L23-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/query/QuerySqlResponse.ts#L23-L60 type Response struct { // Columns Column headings for the search results. Each object is a column. Columns []types.Column `json:"columns,omitempty"` - // Cursor Cursor for the next set of paginated results. For CSV, TSV, and - // TXT responses, this value is returned in the `Cursor` HTTP header. + // Cursor The cursor for the next set of paginated results. + // For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP + // header. Cursor *string `json:"cursor,omitempty"` - // Id Identifier for the search. This value is only returned for async and saved - // synchronous searches. For CSV, TSV, and TXT responses, this value is returned - // in the `Async-ID` HTTP header. + // Id The identifier for the search. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` + // HTTP header. Id *string `json:"id,omitempty"` - // IsPartial If `true`, the response does not contain complete search results. If - // `is_partial` - // is `true` and `is_running` is `true`, the search is still running. If - // `is_partial` - // is `true` but `is_running` is `false`, the results are partial due to a - // failure or - // timeout. This value is only returned for async and saved synchronous - // searches. + // IsPartial If `true`, the response does not contain complete search results. + // If `is_partial` is `true` and `is_running` is `true`, the search is still + // running. + // If `is_partial` is `true` but `is_running` is `false`, the results are + // partial due to a failure or timeout. + // This value is returned only for async and saved synchronous searches. // For CSV, TSV, and TXT responses, this value is returned in the // `Async-partial` HTTP header. IsPartial *bool `json:"is_partial,omitempty"` - // IsRunning If `true`, the search is still running. If false, the search has finished. - // This value is only returned for async and saved synchronous searches. For - // CSV, TSV, and TXT responses, this value is returned in the `Async-partial` - // HTTP header. + // IsRunning If `true`, the search is still running. + // If `false`, the search has finished. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the + // `Async-partial` HTTP header. IsRunning *bool `json:"is_running,omitempty"` - // Rows Values for the search results. + // Rows The values for the search results. Rows [][]json.RawMessage `json:"rows"` } diff --git a/typedapi/sql/translate/request.go b/typedapi/sql/translate/request.go index 507adeb3db..e59c8b0553 100644 --- a/typedapi/sql/translate/request.go +++ b/typedapi/sql/translate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package translate @@ -33,16 +33,16 @@ import ( // Request holds the request body struct for the package translate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/translate/TranslateSqlRequest.ts#L25-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/translate/TranslateSqlRequest.ts#L25-L65 type Request struct { // FetchSize The maximum number of rows (or entries) to return in one response. FetchSize *int `json:"fetch_size,omitempty"` - // Filter Elasticsearch query DSL for additional filtering. + // Filter The Elasticsearch query DSL for additional filtering. Filter *types.Query `json:"filter,omitempty"` - // Query SQL query to run. + // Query The SQL query to run. Query string `json:"query"` - // TimeZone ISO-8601 time zone ID for the search. + // TimeZone The ISO-8601 time zone ID for the search. TimeZone *string `json:"time_zone,omitempty"` } diff --git a/typedapi/sql/translate/response.go b/typedapi/sql/translate/response.go index 96ed1d8a59..e8af2339a0 100644 --- a/typedapi/sql/translate/response.go +++ b/typedapi/sql/translate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package translate @@ -33,7 +33,7 @@ import ( // Response holds the response body struct for the package translate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/translate/TranslateSqlResponse.ts#L28-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/translate/TranslateSqlResponse.ts#L27-L37 type Response struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Fields []types.FieldAndFormat `json:"fields,omitempty"` diff --git a/typedapi/sql/translate/translate.go b/typedapi/sql/translate/translate.go index 2fc1113a43..7826a0be52 100644 --- a/typedapi/sql/translate/translate.go +++ b/typedapi/sql/translate/translate.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Translates SQL into Elasticsearch queries +// Translate SQL into Elasticsearch queries. +// Translate an SQL search into a search API request containing Query DSL. +// It accepts the same request body parameters as the SQL search API, excluding +// `cursor`. package translate import ( @@ -73,7 +76,10 @@ func NewTranslateFunc(tp elastictransport.Interface) NewTranslate { } } -// Translates SQL into Elasticsearch queries +// Translate SQL into Elasticsearch queries. +// Translate an SQL search into a search API request containing Query DSL. +// It accepts the same request body parameters as the SQL search API, excluding +// `cursor`. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html func New(tp elastictransport.Interface) *Translate { @@ -83,8 +89,6 @@ func New(tp elastictransport.Interface) *Translate { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -336,35 +340,53 @@ func (r *Translate) Pretty(pretty bool) *Translate { return r } -// FetchSize The maximum number of rows (or entries) to return in one response. +// The maximum number of rows (or entries) to return in one response. // API name: fetch_size func (r *Translate) FetchSize(fetchsize int) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.FetchSize = &fetchsize return r } -// Filter Elasticsearch query DSL for additional filtering. +// The Elasticsearch query DSL for additional filtering. // API name: filter -func (r *Translate) Filter(filter *types.Query) *Translate { +func (r *Translate) Filter(filter types.QueryVariant) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Filter = filter + r.req.Filter = filter.QueryCaster() return r } -// Query SQL query to run. +// The SQL query to run. // API name: query func (r *Translate) Query(query string) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Query = query return r } -// TimeZone ISO-8601 time zone ID for the search. +// The ISO-8601 time zone ID for the search. // API name: time_zone func (r *Translate) TimeZone(timezone string) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TimeZone = &timezone return r diff --git a/typedapi/ssl/certificates/certificates.go b/typedapi/ssl/certificates/certificates.go index 5775582df4..a2c0ae440a 100644 --- a/typedapi/ssl/certificates/certificates.go +++ b/typedapi/ssl/certificates/certificates.go @@ -16,10 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves information about the X.509 certificates used to encrypt +// Get SSL certificates. +// +// Get information about the X.509 certificates that are used to encrypt // communications in the cluster. +// The API returns a list that includes certificates from all TLS contexts +// including: +// +// - Settings for transport and HTTP interfaces +// - TLS settings that are used within authentication realms +// - TLS settings for remote monitoring exporters +// +// The list includes certificates that are used for configuring trust, such as +// those configured in the `xpack.security.transport.ssl.truststore` and +// `xpack.security.transport.ssl.certificate_authorities` settings. +// It also includes certificates that are used for configuring server identity, +// such as `xpack.security.http.ssl.keystore` and +// `xpack.security.http.ssl.certificate settings`. +// +// The list does not include certificates that are sourced from the default SSL +// context of the Java Runtime Environment (JRE), even if those certificates are +// in use within Elasticsearch. +// +// NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the +// API returns all the certificates that are included in the PKCS#11 token +// irrespective of whether these are used in the Elasticsearch TLS +// configuration. +// +// If Elasticsearch is configured to use a keystore or truststore, the API +// output includes all certificates in that store, even though some of the +// certificates might not be in active use within the cluster. package certificates import ( @@ -69,8 +97,36 @@ func NewCertificatesFunc(tp elastictransport.Interface) NewCertificates { } } -// Retrieves information about the X.509 certificates used to encrypt +// Get SSL certificates. +// +// Get information about the X.509 certificates that are used to encrypt // communications in the cluster. +// The API returns a list that includes certificates from all TLS contexts +// including: +// +// - Settings for transport and HTTP interfaces +// - TLS settings that are used within authentication realms +// - TLS settings for remote monitoring exporters +// +// The list includes certificates that are used for configuring trust, such as +// those configured in the `xpack.security.transport.ssl.truststore` and +// `xpack.security.transport.ssl.certificate_authorities` settings. +// It also includes certificates that are used for configuring server identity, +// such as `xpack.security.http.ssl.keystore` and +// `xpack.security.http.ssl.certificate settings`. +// +// The list does not include certificates that are sourced from the default SSL +// context of the Java Runtime Environment (JRE), even if those certificates are +// in use within Elasticsearch. +// +// NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the +// API returns all the certificates that are included in the PKCS#11 token +// irrespective of whether these are used in the Elasticsearch TLS +// configuration. +// +// If Elasticsearch is configured to use a keystore or truststore, the API +// output includes all certificates in that store, even though some of the +// certificates might not be in active use within the cluster. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html func New(tp elastictransport.Interface) *Certificates { diff --git a/typedapi/ssl/certificates/response.go b/typedapi/ssl/certificates/response.go index a30e2cfef8..0359a95b68 100644 --- a/typedapi/ssl/certificates/response.go +++ b/typedapi/ssl/certificates/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package certificates @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package certificates // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ssl/certificates/GetCertificatesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ssl/certificates/GetCertificatesResponse.ts#L22-L24 type Response []types.CertificateInformation diff --git a/typedapi/synonyms/deletesynonym/delete_synonym.go b/typedapi/synonyms/deletesynonym/delete_synonym.go index e6715b72e1..6df77c5aa7 100644 --- a/typedapi/synonyms/deletesynonym/delete_synonym.go +++ b/typedapi/synonyms/deletesynonym/delete_synonym.go @@ -16,9 +16,34 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a synonym set +// Delete a synonym set. +// +// You can only delete a synonyms set that is not in use by any index analyzer. +// +// Synonyms sets can be used in synonym graph token filters and synonym token +// filters. +// These synonym filters can be used as part of search analyzers. +// +// Analyzers need to be loaded when an index is restored (such as when a node +// starts, or the index becomes open). +// Even if the analyzer is not used on any field mapping, it still needs to be +// loaded on the index recovery phase. +// +// If any analyzers cannot be loaded, the index becomes unavailable and the +// cluster status becomes red or yellow as index shards are not available. +// To prevent that, synonyms sets that are used in analyzers can't be deleted. +// A delete request in this case will return a 400 response code. +// +// To remove a synonyms set, you must first remove all indices that contain +// analyzers using it. +// You can migrate an index by creating a new index that does not contain the +// token filter with the synonyms set, and use the reindex API in order to copy +// over the index data. +// Once finished, you can delete the index. +// When the synonyms set is not used in analyzers, you will be able to delete +// it. package deletesynonym import ( @@ -76,7 +101,32 @@ func NewDeleteSynonymFunc(tp elastictransport.Interface) NewDeleteSynonym { } } -// Deletes a synonym set +// Delete a synonym set. +// +// You can only delete a synonyms set that is not in use by any index analyzer. +// +// Synonyms sets can be used in synonym graph token filters and synonym token +// filters. +// These synonym filters can be used as part of search analyzers. +// +// Analyzers need to be loaded when an index is restored (such as when a node +// starts, or the index becomes open). +// Even if the analyzer is not used on any field mapping, it still needs to be +// loaded on the index recovery phase. +// +// If any analyzers cannot be loaded, the index becomes unavailable and the +// cluster status becomes red or yellow as index shards are not available. +// To prevent that, synonyms sets that are used in analyzers can't be deleted. +// A delete request in this case will return a 400 response code. +// +// To remove a synonyms set, you must first remove all indices that contain +// analyzers using it. +// You can migrate an index by creating a new index that does not contain the +// token filter with the synonyms set, and use the reindex API in order to copy +// over the index data. +// Once finished, you can delete the index. +// When the synonyms set is not used in analyzers, you will be able to delete +// it. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonyms-set.html func New(tp elastictransport.Interface) *DeleteSynonym { @@ -288,7 +338,7 @@ func (r *DeleteSynonym) Header(key, value string) *DeleteSynonym { return r } -// Id The id of the synonyms set to be deleted +// Id The synonyms set identifier to delete. // API Name: id func (r *DeleteSynonym) _id(id string) *DeleteSynonym { r.paramSet |= idMask diff --git a/typedapi/synonyms/deletesynonym/response.go b/typedapi/synonyms/deletesynonym/response.go index 169864c714..f956536fbc 100644 --- a/typedapi/synonyms/deletesynonym/response.go +++ b/typedapi/synonyms/deletesynonym/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletesynonym // Response holds the response body struct for the package deletesynonym // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/delete_synonym/SynonymsDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/delete_synonym/SynonymsDeleteResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go b/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go index a8a7e8e0b9..c0e51fef2a 100644 --- a/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go +++ b/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deletes a synonym rule in a synonym set +// Delete a synonym rule. +// Delete a synonym rule from a synonym set. package deletesynonymrule import ( @@ -81,7 +82,8 @@ func NewDeleteSynonymRuleFunc(tp elastictransport.Interface) NewDeleteSynonymRul } } -// Deletes a synonym rule in a synonym set +// Delete a synonym rule. +// Delete a synonym rule from a synonym set. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonym-rule.html func New(tp elastictransport.Interface) *DeleteSynonymRule { @@ -305,7 +307,7 @@ func (r *DeleteSynonymRule) Header(key, value string) *DeleteSynonymRule { return r } -// SetId The id of the synonym set to be updated +// SetId The ID of the synonym set to update. // API Name: setid func (r *DeleteSynonymRule) _setid(setid string) *DeleteSynonymRule { r.paramSet |= setidMask @@ -314,7 +316,7 @@ func (r *DeleteSynonymRule) _setid(setid string) *DeleteSynonymRule { return r } -// RuleId The id of the synonym rule to be deleted +// RuleId The ID of the synonym rule to delete. // API Name: ruleid func (r *DeleteSynonymRule) _ruleid(ruleid string) *DeleteSynonymRule { r.paramSet |= ruleidMask diff --git a/typedapi/synonyms/deletesynonymrule/response.go b/typedapi/synonyms/deletesynonymrule/response.go index c5bce26baa..b50db809d5 100644 --- a/typedapi/synonyms/deletesynonymrule/response.go +++ b/typedapi/synonyms/deletesynonymrule/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletesynonymrule @@ -27,13 +27,13 @@ import ( // Response holds the response body struct for the package deletesynonymrule // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/delete_synonym_rule/SynonymRuleDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/delete_synonym_rule/SynonymRuleDeleteResponse.ts#L22-L24 type Response struct { // ReloadAnalyzersDetails Updating synonyms in a synonym set reloads the associated analyzers. - // This is the analyzers reloading result + // This information is the analyzers reloading result. ReloadAnalyzersDetails types.ReloadResult `json:"reload_analyzers_details"` - // Result Update operation result + // Result The update operation result. Result result.Result `json:"result"` } diff --git a/typedapi/synonyms/getsynonym/get_synonym.go b/typedapi/synonyms/getsynonym/get_synonym.go index 0a2de094ee..1ec8851a9c 100644 --- a/typedapi/synonyms/getsynonym/get_synonym.go +++ b/typedapi/synonyms/getsynonym/get_synonym.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves a synonym set +// Get a synonym set. package getsynonym import ( @@ -76,7 +76,7 @@ func NewGetSynonymFunc(tp elastictransport.Interface) NewGetSynonym { } } -// Retrieves a synonym set +// Get a synonym set. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonyms-set.html func New(tp elastictransport.Interface) *GetSynonym { @@ -288,7 +288,7 @@ func (r *GetSynonym) Header(key, value string) *GetSynonym { return r } -// Id "The id of the synonyms set to be retrieved +// Id The synonyms set identifier to retrieve. // API Name: id func (r *GetSynonym) _id(id string) *GetSynonym { r.paramSet |= idMask @@ -297,7 +297,7 @@ func (r *GetSynonym) _id(id string) *GetSynonym { return r } -// From Starting offset for query rules to be retrieved +// From The starting offset for query rules to retrieve. // API name: from func (r *GetSynonym) From(from int) *GetSynonym { r.values.Set("from", strconv.Itoa(from)) @@ -305,7 +305,7 @@ func (r *GetSynonym) From(from int) *GetSynonym { return r } -// Size specifies a max number of query rules to retrieve +// Size The max number of query rules to retrieve. // API name: size func (r *GetSynonym) Size(size int) *GetSynonym { r.values.Set("size", strconv.Itoa(size)) diff --git a/typedapi/synonyms/getsynonym/response.go b/typedapi/synonyms/getsynonym/response.go index 8365e2869b..0038ef95d9 100644 --- a/typedapi/synonyms/getsynonym/response.go +++ b/typedapi/synonyms/getsynonym/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getsynonym @@ -26,9 +26,12 @@ import ( // Response holds the response body struct for the package getsynonym // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/get_synonym/SynonymsGetResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/get_synonym/SynonymsGetResponse.ts#L23-L34 type Response struct { - Count int `json:"count"` + + // Count The total number of synonyms rules that the synonyms set contains. + Count int `json:"count"` + // SynonymsSet Synonym rule details. SynonymsSet []types.SynonymRuleRead `json:"synonyms_set"` } diff --git a/typedapi/synonyms/getsynonymrule/get_synonym_rule.go b/typedapi/synonyms/getsynonymrule/get_synonym_rule.go index b66d0a7488..cb822fdcfc 100644 --- a/typedapi/synonyms/getsynonymrule/get_synonym_rule.go +++ b/typedapi/synonyms/getsynonymrule/get_synonym_rule.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves a synonym rule from a synonym set +// Get a synonym rule. +// Get a synonym rule from a synonym set. package getsynonymrule import ( @@ -81,7 +82,8 @@ func NewGetSynonymRuleFunc(tp elastictransport.Interface) NewGetSynonymRule { } } -// Retrieves a synonym rule from a synonym set +// Get a synonym rule. +// Get a synonym rule from a synonym set. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonym-rule.html func New(tp elastictransport.Interface) *GetSynonymRule { @@ -305,7 +307,7 @@ func (r *GetSynonymRule) Header(key, value string) *GetSynonymRule { return r } -// SetId The id of the synonym set to retrieve the synonym rule from +// SetId The ID of the synonym set to retrieve the synonym rule from. // API Name: setid func (r *GetSynonymRule) _setid(setid string) *GetSynonymRule { r.paramSet |= setidMask @@ -314,7 +316,7 @@ func (r *GetSynonymRule) _setid(setid string) *GetSynonymRule { return r } -// RuleId The id of the synonym rule to retrieve +// RuleId The ID of the synonym rule to retrieve. // API Name: ruleid func (r *GetSynonymRule) _ruleid(ruleid string) *GetSynonymRule { r.paramSet |= ruleidMask diff --git a/typedapi/synonyms/getsynonymrule/response.go b/typedapi/synonyms/getsynonymrule/response.go index b938a31daa..b5224a3831 100644 --- a/typedapi/synonyms/getsynonymrule/response.go +++ b/typedapi/synonyms/getsynonymrule/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getsynonymrule // Response holds the response body struct for the package getsynonymrule // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/get_synonym_rule/SynonymRuleGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/get_synonym_rule/SynonymRuleGetResponse.ts#L22-L24 type Response struct { // Id Synonym Rule identifier diff --git a/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go b/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go index 4addaa6b55..5a8c1cadee 100644 --- a/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go +++ b/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves a summary of all defined synonym sets +// Get all synonym sets. +// Get a summary of all defined synonym sets. package getsynonymssets import ( @@ -68,9 +69,10 @@ func NewGetSynonymsSetsFunc(tp elastictransport.Interface) NewGetSynonymsSets { } } -// Retrieves a summary of all defined synonym sets +// Get all synonym sets. +// Get a summary of all defined synonym sets. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/list-synonyms-sets.html +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonyms-set.html func New(tp elastictransport.Interface) *GetSynonymsSets { r := &GetSynonymsSets{ transport: tp, @@ -274,7 +276,7 @@ func (r *GetSynonymsSets) Header(key, value string) *GetSynonymsSets { return r } -// From Starting offset +// From The starting offset for synonyms sets to retrieve. // API name: from func (r *GetSynonymsSets) From(from int) *GetSynonymsSets { r.values.Set("from", strconv.Itoa(from)) @@ -282,7 +284,7 @@ func (r *GetSynonymsSets) From(from int) *GetSynonymsSets { return r } -// Size specifies a max number of results to get +// Size The maximum number of synonyms sets to retrieve. // API name: size func (r *GetSynonymsSets) Size(size int) *GetSynonymsSets { r.values.Set("size", strconv.Itoa(size)) diff --git a/typedapi/synonyms/getsynonymssets/response.go b/typedapi/synonyms/getsynonymssets/response.go index 96af6199f9..27ea09f697 100644 --- a/typedapi/synonyms/getsynonymssets/response.go +++ b/typedapi/synonyms/getsynonymssets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getsynonymssets @@ -26,9 +26,13 @@ import ( // Response holds the response body struct for the package getsynonymssets // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L23-L34 type Response struct { - Count int `json:"count"` + + // Count The total number of synonyms sets defined. + Count int `json:"count"` + // Results The identifier and total number of defined synonym rules for each synonyms + // set. Results []types.SynonymsSetItem `json:"results"` } diff --git a/typedapi/synonyms/putsynonym/put_synonym.go b/typedapi/synonyms/putsynonym/put_synonym.go index ec5f849ba5..2634cb9345 100644 --- a/typedapi/synonyms/putsynonym/put_synonym.go +++ b/typedapi/synonyms/putsynonym/put_synonym.go @@ -16,9 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a synonym set. +// Create or update a synonym set. +// Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +// If you need to manage more synonym rules, you can create multiple synonym +// sets. +// +// When an existing synonyms set is updated, the search analyzers that use the +// synonyms set are reloaded automatically for all indices. +// This is equivalent to invoking the reload search analyzers API for all +// indices that use the synonyms set. package putsynonym import ( @@ -81,7 +89,15 @@ func NewPutSynonymFunc(tp elastictransport.Interface) NewPutSynonym { } } -// Creates or updates a synonym set. +// Create or update a synonym set. +// Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +// If you need to manage more synonym rules, you can create multiple synonym +// sets. +// +// When an existing synonyms set is updated, the search analyzers that use the +// synonyms set are reloaded automatically for all indices. +// This is equivalent to invoking the reload search analyzers API for all +// indices that use the synonyms set. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonyms-set.html func New(tp elastictransport.Interface) *PutSynonym { @@ -91,8 +107,6 @@ func New(tp elastictransport.Interface) *PutSynonym { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -304,7 +318,7 @@ func (r *PutSynonym) Header(key, value string) *PutSynonym { return r } -// Id The id of the synonyms set to be created or updated +// Id The ID of the synonyms set to be created or updated. // API Name: id func (r *PutSynonym) _id(id string) *PutSynonym { r.paramSet |= idMask @@ -357,10 +371,17 @@ func (r *PutSynonym) Pretty(pretty bool) *PutSynonym { return r } -// SynonymsSet The synonym set information to update +// The synonym rules definitions for the synonyms set. // API name: synonyms_set -func (r *PutSynonym) SynonymsSet(synonymssets ...types.SynonymRule) *PutSynonym { - r.req.SynonymsSet = synonymssets +func (r *PutSynonym) SynonymsSet(synonymssets ...types.SynonymRuleVariant) *PutSynonym { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SynonymsSet = make([]types.SynonymRule, len(synonymssets)) + for i, v := range synonymssets { + r.req.SynonymsSet[i] = *v.SynonymRuleCaster() + } return r } diff --git a/typedapi/synonyms/putsynonym/request.go b/typedapi/synonyms/putsynonym/request.go index c05cd38915..d0c0613f2d 100644 --- a/typedapi/synonyms/putsynonym/request.go +++ b/typedapi/synonyms/putsynonym/request.go @@ -16,23 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putsynonym import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Request holds the request body struct for the package putsynonym // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/put_synonym/SynonymsPutRequest.ts#L23-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/put_synonym/SynonymsPutRequest.ts#L23-L55 type Request struct { - // SynonymsSet The synonym set information to update + // SynonymsSet The synonym rules definitions for the synonyms set. SynonymsSet []types.SynonymRule `json:"synonyms_set"` } @@ -54,3 +57,38 @@ func (r *Request) FromJSON(data string) (*Request, error) { return &req, nil } + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "synonyms_set": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewSynonymRule() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "SynonymsSet", err) + } + + s.SynonymsSet = append(s.SynonymsSet, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.SynonymsSet); err != nil { + return fmt.Errorf("%s | %w", "SynonymsSet", err) + } + } + + } + } + return nil +} diff --git a/typedapi/synonyms/putsynonym/response.go b/typedapi/synonyms/putsynonym/response.go index d5b8bb93ac..593aa17d8d 100644 --- a/typedapi/synonyms/putsynonym/response.go +++ b/typedapi/synonyms/putsynonym/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putsynonym @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package putsynonym // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/put_synonym/SynonymsPutResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/put_synonym/SynonymsPutResponse.ts#L23-L28 type Response struct { ReloadAnalyzersDetails types.ReloadResult `json:"reload_analyzers_details"` Result result.Result `json:"result"` diff --git a/typedapi/synonyms/putsynonymrule/put_synonym_rule.go b/typedapi/synonyms/putsynonymrule/put_synonym_rule.go index 2ff799face..dec5a1c038 100644 --- a/typedapi/synonyms/putsynonymrule/put_synonym_rule.go +++ b/typedapi/synonyms/putsynonymrule/put_synonym_rule.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates or updates a synonym rule in a synonym set +// Create or update a synonym rule. +// Create or update a synonym rule in a synonym set. +// +// If any of the synonym rules included is invalid, the API returns an error. +// +// When you update a synonym rule, all analyzers using the synonyms set will be +// reloaded automatically to reflect the new rule. package putsynonymrule import ( @@ -86,7 +92,13 @@ func NewPutSynonymRuleFunc(tp elastictransport.Interface) NewPutSynonymRule { } } -// Creates or updates a synonym rule in a synonym set +// Create or update a synonym rule. +// Create or update a synonym rule in a synonym set. +// +// If any of the synonym rules included is invalid, the API returns an error. +// +// When you update a synonym rule, all analyzers using the synonyms set will be +// reloaded automatically to reflect the new rule. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonym-rule.html func New(tp elastictransport.Interface) *PutSynonymRule { @@ -96,8 +108,6 @@ func New(tp elastictransport.Interface) *PutSynonymRule { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -315,7 +325,7 @@ func (r *PutSynonymRule) Header(key, value string) *PutSynonymRule { return r } -// SetId The id of the synonym set to be updated with the synonym rule +// SetId The ID of the synonym set. // API Name: setid func (r *PutSynonymRule) _setid(setid string) *PutSynonymRule { r.paramSet |= setidMask @@ -324,7 +334,7 @@ func (r *PutSynonymRule) _setid(setid string) *PutSynonymRule { return r } -// RuleId The id of the synonym rule to be updated or created +// RuleId The ID of the synonym rule to be updated or created. // API Name: ruleid func (r *PutSynonymRule) _ruleid(ruleid string) *PutSynonymRule { r.paramSet |= ruleidMask @@ -377,8 +387,14 @@ func (r *PutSynonymRule) Pretty(pretty bool) *PutSynonymRule { return r } +// The synonym rule information definition, which must be in Solr format. // API name: synonyms func (r *PutSynonymRule) Synonyms(synonymstring string) *PutSynonymRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Synonyms = synonymstring return r diff --git a/typedapi/synonyms/putsynonymrule/request.go b/typedapi/synonyms/putsynonymrule/request.go index 0b4e7a4fe5..f48d12f20c 100644 --- a/typedapi/synonyms/putsynonymrule/request.go +++ b/typedapi/synonyms/putsynonymrule/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putsynonymrule @@ -30,8 +30,10 @@ import ( // Request holds the request body struct for the package putsynonymrule // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/put_synonym_rule/SynonymRulePutRequest.ts#L23-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/put_synonym_rule/SynonymRulePutRequest.ts#L23-L60 type Request struct { + + // Synonyms The synonym rule information definition, which must be in Solr format. Synonyms string `json:"synonyms"` } diff --git a/typedapi/synonyms/putsynonymrule/response.go b/typedapi/synonyms/putsynonymrule/response.go index 6987eef505..d762b6ad79 100644 --- a/typedapi/synonyms/putsynonymrule/response.go +++ b/typedapi/synonyms/putsynonymrule/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putsynonymrule @@ -27,13 +27,13 @@ import ( // Response holds the response body struct for the package putsynonymrule // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/put_synonym_rule/SynonymRulePutResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/put_synonym_rule/SynonymRulePutResponse.ts#L22-L24 type Response struct { // ReloadAnalyzersDetails Updating synonyms in a synonym set reloads the associated analyzers. - // This is the analyzers reloading result + // This information is the analyzers reloading result. ReloadAnalyzersDetails types.ReloadResult `json:"reload_analyzers_details"` - // Result Update operation result + // Result The update operation result. Result result.Result `json:"result"` } diff --git a/typedapi/tasks/cancel/cancel.go b/typedapi/tasks/cancel/cancel.go index 04534547fb..21fb631871 100644 --- a/typedapi/tasks/cancel/cancel.go +++ b/typedapi/tasks/cancel/cancel.go @@ -16,9 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Cancels a task, if it can be cancelled through an API. +// Cancel a task. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// A task may continue to run for some time after it has been cancelled because +// it may not be able to safely stop its current activity straight away. +// It is also possible that Elasticsearch must complete its work on other tasks +// before it can process the cancellation. +// The get task information API will continue to list these cancelled tasks +// until they complete. +// The cancelled flag in the response indicates that the cancellation command +// has been processed and the task will stop as soon as possible. +// +// To troubleshoot why a cancelled task does not complete promptly, use the get +// task information API with the `?detailed` parameter to identify the other +// tasks the system is running. +// You can also use the node hot threads API to obtain detailed information +// about the work the system is doing instead of completing the cancelled task. package cancel import ( @@ -74,7 +93,26 @@ func NewCancelFunc(tp elastictransport.Interface) NewCancel { } } -// Cancels a task, if it can be cancelled through an API. +// Cancel a task. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// A task may continue to run for some time after it has been cancelled because +// it may not be able to safely stop its current activity straight away. +// It is also possible that Elasticsearch must complete its work on other tasks +// before it can process the cancellation. +// The get task information API will continue to list these cancelled tasks +// until they complete. +// The cancelled flag in the response indicates that the cancellation command +// has been processed and the task will stop as soon as possible. +// +// To troubleshoot why a cancelled task does not complete promptly, use the get +// task information API with the `?detailed` parameter to identify the other +// tasks the system is running. +// You can also use the node hot threads API to obtain detailed information +// about the work the system is doing instead of completing the cancelled task. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html func New(tp elastictransport.Interface) *Cancel { @@ -295,7 +333,7 @@ func (r *Cancel) Header(key, value string) *Cancel { return r } -// TaskId ID of the task. +// TaskId The task identifier. // API Name: taskid func (r *Cancel) TaskId(taskid string) *Cancel { r.paramSet |= taskidMask @@ -304,8 +342,8 @@ func (r *Cancel) TaskId(taskid string) *Cancel { return r } -// Actions Comma-separated list or wildcard expression of actions used to limit the -// request. +// Actions A comma-separated list or wildcard expression of actions that is used to +// limit the request. // API name: actions func (r *Cancel) Actions(actions ...string) *Cancel { tmp := []string{} @@ -317,7 +355,8 @@ func (r *Cancel) Actions(actions ...string) *Cancel { return r } -// Nodes Comma-separated list of node IDs or names used to limit the request. +// Nodes A comma-separated list of node IDs or names that is used to limit the +// request. // API name: nodes func (r *Cancel) Nodes(nodes ...string) *Cancel { tmp := []string{} @@ -329,7 +368,7 @@ func (r *Cancel) Nodes(nodes ...string) *Cancel { return r } -// ParentTaskId Parent task ID used to limit the tasks. +// ParentTaskId A parent task ID that is used to limit the tasks. // API name: parent_task_id func (r *Cancel) ParentTaskId(parenttaskid string) *Cancel { r.values.Set("parent_task_id", parenttaskid) @@ -337,8 +376,7 @@ func (r *Cancel) ParentTaskId(parenttaskid string) *Cancel { return r } -// WaitForCompletion Should the request block until the cancellation of the task and its -// descendant tasks is completed. Defaults to false +// WaitForCompletion If true, the request blocks until all found tasks are complete. // API name: wait_for_completion func (r *Cancel) WaitForCompletion(waitforcompletion bool) *Cancel { r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) diff --git a/typedapi/tasks/cancel/response.go b/typedapi/tasks/cancel/response.go index 2181f5b269..249ec24191 100644 --- a/typedapi/tasks/cancel/response.go +++ b/typedapi/tasks/cancel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package cancel @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package cancel // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/tasks/cancel/CancelTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/tasks/cancel/CancelTasksResponse.ts#L22-L24 type Response struct { NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` // Nodes Task information grouped by node, if `group_by` was set to `node` (the diff --git a/typedapi/tasks/get/get.go b/typedapi/tasks/get/get.go index 4929dd46d1..29e8fde40b 100644 --- a/typedapi/tasks/get/get.go +++ b/typedapi/tasks/get/get.go @@ -16,10 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get task information. -// Returns information about the tasks currently executing in the cluster. +// Get information about a task currently running in the cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// If the task identifier is not found, a 404 response code indicates that there +// are no resources that match the request. package get import ( @@ -78,7 +85,14 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { } // Get task information. -// Returns information about the tasks currently executing in the cluster. +// Get information about a task currently running in the cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// If the task identifier is not found, a 404 response code indicates that there +// are no resources that match the request. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html func New(tp elastictransport.Interface) *Get { @@ -290,7 +304,7 @@ func (r *Get) Header(key, value string) *Get { return r } -// TaskId ID of the task. +// TaskId The task identifier. // API Name: taskid func (r *Get) _taskid(taskid string) *Get { r.paramSet |= taskidMask @@ -299,7 +313,7 @@ func (r *Get) _taskid(taskid string) *Get { return r } -// Timeout Period to wait for a response. +// Timeout The period to wait for a response. // If no response is received before the timeout expires, the request fails and // returns an error. // API name: timeout diff --git a/typedapi/tasks/get/response.go b/typedapi/tasks/get/response.go index 423444ac63..717670049c 100644 --- a/typedapi/tasks/get/response.go +++ b/typedapi/tasks/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package get @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/tasks/get/GetTaskResponse.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/tasks/get/GetTaskResponse.ts#L24-L31 type Response struct { Completed bool `json:"completed"` Error *types.ErrorCause `json:"error,omitempty"` diff --git a/typedapi/tasks/list/list.go b/typedapi/tasks/list/list.go index b2c8f00a7a..d385c9ed26 100644 --- a/typedapi/tasks/list/list.go +++ b/typedapi/tasks/list/list.go @@ -16,10 +16,77 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// The task management API returns information about tasks currently executing -// on one or more nodes in the cluster. +// Get all tasks. +// Get information about the tasks currently running on one or more nodes in the +// cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// **Identifying running tasks** +// +// The `X-Opaque-Id header`, when provided on the HTTP request header, is going +// to be returned as a header in the response as well as in the headers field +// for in the task information. +// This enables you to track certain calls or associate certain tasks with the +// client that started them. +// For example: +// +// ``` +// curl -i -H "X-Opaque-Id: 123456" +// "http://localhost:9200/_tasks?group_by=parents" +// ``` +// +// The API returns the following result: +// +// ``` +// HTTP/1.1 200 OK +// X-Opaque-Id: 123456 +// content-type: application/json; charset=UTF-8 +// content-length: 831 +// +// { +// "tasks" : { +// "u5lcZHqcQhu-rUoFaqDphA:45" : { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 45, +// "type" : "transport", +// "action" : "cluster:monitor/tasks/lists", +// "start_time_in_millis" : 1513823752749, +// "running_time_in_nanos" : 293139, +// "cancellable" : false, +// "headers" : { +// "X-Opaque-Id" : "123456" +// }, +// "children" : [ +// { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 46, +// "type" : "direct", +// "action" : "cluster:monitor/tasks/lists[n]", +// "start_time_in_millis" : 1513823752750, +// "running_time_in_nanos" : 92133, +// "cancellable" : false, +// "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", +// "headers" : { +// "X-Opaque-Id" : "123456" +// } +// } +// ] +// } +// } +// } +// +// ``` +// In this example, `X-Opaque-Id: 123456` is the ID as a part of the response +// header. +// The `X-Opaque-Id` in the task `headers` is the ID for the task that was +// initiated by the REST request. +// The `X-Opaque-Id` in the children `headers` is the child task of the task +// that was initiated by the REST request. package list import ( @@ -70,8 +137,75 @@ func NewListFunc(tp elastictransport.Interface) NewList { } } -// The task management API returns information about tasks currently executing -// on one or more nodes in the cluster. +// Get all tasks. +// Get information about the tasks currently running on one or more nodes in the +// cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// **Identifying running tasks** +// +// The `X-Opaque-Id header`, when provided on the HTTP request header, is going +// to be returned as a header in the response as well as in the headers field +// for in the task information. +// This enables you to track certain calls or associate certain tasks with the +// client that started them. +// For example: +// +// ``` +// curl -i -H "X-Opaque-Id: 123456" +// "http://localhost:9200/_tasks?group_by=parents" +// ``` +// +// The API returns the following result: +// +// ``` +// HTTP/1.1 200 OK +// X-Opaque-Id: 123456 +// content-type: application/json; charset=UTF-8 +// content-length: 831 +// +// { +// "tasks" : { +// "u5lcZHqcQhu-rUoFaqDphA:45" : { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 45, +// "type" : "transport", +// "action" : "cluster:monitor/tasks/lists", +// "start_time_in_millis" : 1513823752749, +// "running_time_in_nanos" : 293139, +// "cancellable" : false, +// "headers" : { +// "X-Opaque-Id" : "123456" +// }, +// "children" : [ +// { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 46, +// "type" : "direct", +// "action" : "cluster:monitor/tasks/lists[n]", +// "start_time_in_millis" : 1513823752750, +// "running_time_in_nanos" : 92133, +// "cancellable" : false, +// "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", +// "headers" : { +// "X-Opaque-Id" : "123456" +// } +// } +// ] +// } +// } +// } +// +// ``` +// In this example, `X-Opaque-Id: 123456` is the ID as a part of the response +// header. +// The `X-Opaque-Id` in the task `headers` is the ID for the task that was +// initiated by the REST request. +// The `X-Opaque-Id` in the children `headers` is the child task of the task +// that was initiated by the REST request. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html func New(tp elastictransport.Interface) *List { @@ -277,8 +411,9 @@ func (r *List) Header(key, value string) *List { return r } -// Actions Comma-separated list or wildcard expression of actions used to limit the +// Actions A comma-separated list or wildcard expression of actions used to limit the // request. +// For example, you can use `cluser:*` to retrieve all cluster-related tasks. // API name: actions func (r *List) Actions(actions ...string) *List { tmp := []string{} @@ -290,7 +425,10 @@ func (r *List) Actions(actions ...string) *List { return r } -// Detailed If `true`, the response includes detailed information about shard recoveries. +// Detailed If `true`, the response includes detailed information about the running +// tasks. +// This information is useful to distinguish tasks from each other but is more +// costly to run. // API name: detailed func (r *List) Detailed(detailed bool) *List { r.values.Set("detailed", strconv.FormatBool(detailed)) @@ -298,7 +436,8 @@ func (r *List) Detailed(detailed bool) *List { return r } -// GroupBy Key used to group tasks in the response. +// GroupBy A key that is used to group tasks in the response. +// The task lists can be grouped either by nodes or by parent tasks. // API name: group_by func (r *List) GroupBy(groupby groupby.GroupBy) *List { r.values.Set("group_by", groupby.String()) @@ -306,20 +445,18 @@ func (r *List) GroupBy(groupby groupby.GroupBy) *List { return r } -// NodeId Comma-separated list of node IDs or names used to limit returned information. -// API name: node_id -func (r *List) NodeId(nodeids ...string) *List { - tmp := []string{} - for _, item := range nodeids { - tmp = append(tmp, fmt.Sprintf("%v", item)) - } - r.values.Set("node_id", strings.Join(tmp, ",")) +// Nodes A comma-separated list of node IDs or names that is used to limit the +// returned information. +// API name: nodes +func (r *List) Nodes(nodeids ...string) *List { + r.values.Set("nodes", strings.Join(nodeids, ",")) return r } -// ParentTaskId Parent task ID used to limit returned information. To return all tasks, omit -// this parameter or use a value of `-1`. +// ParentTaskId A parent task identifier that is used to limit returned information. +// To return all tasks, omit this parameter or use a value of `-1`. +// If the parent task is not found, the API does not return a 404 response code. // API name: parent_task_id func (r *List) ParentTaskId(id string) *List { r.values.Set("parent_task_id", id) @@ -327,17 +464,10 @@ func (r *List) ParentTaskId(id string) *List { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. -// API name: master_timeout -func (r *List) MasterTimeout(duration string) *List { - r.values.Set("master_timeout", duration) - - return r -} - -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails and returns an error. +// Timeout The period to wait for each node to respond. +// If a node does not respond before its timeout expires, the response does not +// include its information. +// However, timed out nodes are included in the `node_failures` property. // API name: timeout func (r *List) Timeout(duration string) *List { r.values.Set("timeout", duration) diff --git a/typedapi/tasks/list/response.go b/typedapi/tasks/list/response.go index 416d48cbd2..9075602cb5 100644 --- a/typedapi/tasks/list/response.go +++ b/typedapi/tasks/list/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package list @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package list // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/tasks/list/ListTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/tasks/list/ListTasksResponse.ts#L22-L24 type Response struct { NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` // Nodes Task information grouped by node, if `group_by` was set to `node` (the diff --git a/typedapi/textstructure/findfieldstructure/find_field_structure.go b/typedapi/textstructure/findfieldstructure/find_field_structure.go index b377eed298..a09d7dc2e9 100644 --- a/typedapi/textstructure/findfieldstructure/find_field_structure.go +++ b/typedapi/textstructure/findfieldstructure/find_field_structure.go @@ -16,21 +16,52 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Finds the structure of a text field in an index. +// Find the structure of a text field. +// Find the structure of a text field in an Elasticsearch index. +// +// This API provides a starting point for extracting further information from +// log messages already ingested into Elasticsearch. +// For example, if you have ingested data into a very simple index that has just +// `@timestamp` and message fields, you can use this API to see what common +// structure exists in the message field. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. package findfieldstructure import ( "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/formattype" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -65,7 +96,33 @@ func NewFindFieldStructureFunc(tp elastictransport.Interface) NewFindFieldStruct } } -// Finds the structure of a text field in an index. +// Find the structure of a text field. +// Find the structure of a text field in an Elasticsearch index. +// +// This API provides a starting point for extracting further information from +// log messages already ingested into Elasticsearch. +// For example, if you have ingested data into a very simple index that has just +// `@timestamp` and message fields, you can use this API to see what common +// structure exists in the message field. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-field-structure.html func New(tp elastictransport.Interface) *FindFieldStructure { @@ -174,8 +231,57 @@ func (r FindFieldStructure) Perform(providedCtx context.Context) (*http.Response } // Do runs the request through the transport, handle the response and returns a findfieldstructure.Response -func (r FindFieldStructure) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) +func (r FindFieldStructure) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.find_field_structure") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -223,3 +329,268 @@ func (r *FindFieldStructure) Header(key, value string) *FindFieldStructure { return r } + +// ColumnNames If `format` is set to `delimited`, you can specify the column names in a +// comma-separated list. +// If this parameter is not specified, the structure finder uses the column +// names from the header row of the text. +// If the text does not have a header row, columns are named "column1", +// "column2", "column3", for example. +// API name: column_names +func (r *FindFieldStructure) ColumnNames(columnnames string) *FindFieldStructure { + r.values.Set("column_names", columnnames) + + return r +} + +// Delimiter If you have set `format` to `delimited`, you can specify the character used +// to delimit the values in each row. +// Only a single character is supported; the delimiter cannot have multiple +// characters. +// By default, the API considers the following possibilities: comma, tab, +// semi-colon, and pipe (`|`). +// In this default scenario, all rows must have the same number of fields for +// the delimited format to be detected. +// If you specify a delimiter, up to 10% of the rows can have a different number +// of columns than the first row. +// API name: delimiter +func (r *FindFieldStructure) Delimiter(delimiter string) *FindFieldStructure { + r.values.Set("delimiter", delimiter) + + return r +} + +// DocumentsToSample The number of documents to include in the structural analysis. +// The minimum value is 2. +// API name: documents_to_sample +func (r *FindFieldStructure) DocumentsToSample(documentstosample string) *FindFieldStructure { + r.values.Set("documents_to_sample", documentstosample) + + return r +} + +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// This setting primarily has an impact when a whole message Grok pattern such +// as `%{CATALINALOG}` matches the input. +// If the structure finder identifies a common structure but has no idea of the +// meaning then generic field names such as `path`, `ipaddress`, `field1`, and +// `field2` are used in the `grok_pattern` output. +// The intention in that situation is that a user who knows the meanings will +// rename the fields before using them. +// API name: ecs_compatibility +func (r *FindFieldStructure) EcsCompatibility(ecscompatibility ecscompatibilitytype.EcsCompatibilityType) *FindFieldStructure { + r.values.Set("ecs_compatibility", ecscompatibility.String()) + + return r +} + +// Explain If `true`, the response includes a field named `explanation`, which is an +// array of strings that indicate how the structure finder produced its result. +// API name: explain +func (r *FindFieldStructure) Explain(explain bool) *FindFieldStructure { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// Field The field that should be analyzed. +// API name: field +func (r *FindFieldStructure) Field(field string) *FindFieldStructure { + r.values.Set("field", field) + + return r +} + +// Format The high level structure of the text. +// By default, the API chooses the format. +// In this default scenario, all rows must have the same number of fields for a +// delimited format to be detected. +// If the format is set to delimited and the delimiter is not set, however, the +// API tolerates up to 5% of rows that have a different number of columns than +// the first row. +// API name: format +func (r *FindFieldStructure) Format(format formattype.FormatType) *FindFieldStructure { + r.values.Set("format", format.String()) + + return r +} + +// GrokPattern If the format is `semi_structured_text`, you can specify a Grok pattern that +// is used to extract fields from every message in the text. +// The name of the timestamp field in the Grok pattern must match what is +// specified in the `timestamp_field` parameter. +// If that parameter is not specified, the name of the timestamp field in the +// Grok pattern must match "timestamp". +// If `grok_pattern` is not specified, the structure finder creates a Grok +// pattern. +// API name: grok_pattern +func (r *FindFieldStructure) GrokPattern(grokpattern string) *FindFieldStructure { + r.values.Set("grok_pattern", grokpattern) + + return r +} + +// Index The name of the index that contains the analyzed field. +// API name: index +func (r *FindFieldStructure) Index(indexname string) *FindFieldStructure { + r.values.Set("index", indexname) + + return r +} + +// Quote If the format is `delimited`, you can specify the character used to quote the +// values in each row if they contain newlines or the delimiter character. +// Only a single character is supported. +// If this parameter is not specified, the default value is a double quote +// (`"`). +// If your delimited text format does not use quoting, a workaround is to set +// this argument to a character that does not appear anywhere in the sample. +// API name: quote +func (r *FindFieldStructure) Quote(quote string) *FindFieldStructure { + r.values.Set("quote", quote) + + return r +} + +// ShouldTrimFields If the format is `delimited`, you can specify whether values between +// delimiters should have whitespace trimmed from them. +// If this parameter is not specified and the delimiter is pipe (`|`), the +// default value is true. +// Otherwise, the default value is `false`. +// API name: should_trim_fields +func (r *FindFieldStructure) ShouldTrimFields(shouldtrimfields bool) *FindFieldStructure { + r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) + + return r +} + +// Timeout The maximum amount of time that the structure analysis can take. +// If the analysis is still running when the timeout expires, it will be +// stopped. +// API name: timeout +func (r *FindFieldStructure) Timeout(duration string) *FindFieldStructure { + r.values.Set("timeout", duration) + + return r +} + +// TimestampField The name of the field that contains the primary timestamp of each record in +// the text. +// In particular, if the text was ingested into an index, this is the field that +// would be used to populate the `@timestamp` field. +// +// If the format is `semi_structured_text`, this field must match the name of +// the appropriate extraction in the `grok_pattern`. +// Therefore, for semi-structured text, it is best not to specify this parameter +// unless `grok_pattern` is also specified. +// +// For structured text, if you specify this parameter, the field must exist +// within the text. +// +// If this parameter is not specified, the structure finder makes a decision +// about which field (if any) is the primary timestamp field. +// For structured text, it is not compulsory to have a timestamp in the text. +// API name: timestamp_field +func (r *FindFieldStructure) TimestampField(field string) *FindFieldStructure { + r.values.Set("timestamp_field", field) + + return r +} + +// TimestampFormat The Java time format of the timestamp field in the text. +// Only a subset of Java time format letter groups are supported: +// +// * `a` +// * `d` +// * `dd` +// * `EEE` +// * `EEEE` +// * `H` +// * `HH` +// * `h` +// * `M` +// * `MM` +// * `MMM` +// * `MMMM` +// * `mm` +// * `ss` +// * `XX` +// * `XXX` +// * `yy` +// * `yyyy` +// * `zzz` +// +// Additionally `S` letter groups (fractional seconds) of length one to nine are +// supported providing they occur after `ss` and are separated from the `ss` by +// a period (`.`), comma (`,`), or colon (`:`). +// Spacing and punctuation is also permitted with the exception a question mark +// (`?`), newline, and carriage return, together with literal text enclosed in +// single quotes. +// For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +// +// One valuable use case for this parameter is when the format is +// semi-structured text, there are multiple timestamp formats in the text, and +// you know which format corresponds to the primary timestamp, but you do not +// want to specify the full `grok_pattern`. +// Another is when the timestamp format is one that the structure finder does +// not consider by default. +// +// If this parameter is not specified, the structure finder chooses the best +// format from a built-in set. +// +// If the special value `null` is specified, the structure finder will not look +// for a primary timestamp in the text. +// When the format is semi-structured text, this will result in the structure +// finder treating the text as single-line messages. +// API name: timestamp_format +func (r *FindFieldStructure) TimestampFormat(timestampformat string) *FindFieldStructure { + r.values.Set("timestamp_format", timestampformat) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FindFieldStructure) ErrorTrace(errortrace bool) *FindFieldStructure { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FindFieldStructure) FilterPath(filterpaths ...string) *FindFieldStructure { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FindFieldStructure) Human(human bool) *FindFieldStructure { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FindFieldStructure) Pretty(pretty bool) *FindFieldStructure { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/textstructure/findfieldstructure/response.go b/typedapi/textstructure/findfieldstructure/response.go new file mode 100644 index 0000000000..97ec3a8735 --- /dev/null +++ b/typedapi/textstructure/findfieldstructure/response.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package findfieldstructure + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/formattype" +) + +// Response holds the response body struct for the package findfieldstructure +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/find_field_structure/FindFieldStructureResponse.ts#L31-L49 +type Response struct { + Charset string `json:"charset"` + EcsCompatibility *ecscompatibilitytype.EcsCompatibilityType `json:"ecs_compatibility,omitempty"` + FieldStats map[string]types.FieldStat `json:"field_stats"` + Format formattype.FormatType `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + NeedClientTimezone bool `json:"need_client_timezone"` + NumLinesAnalyzed int `json:"num_lines_analyzed"` + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + SampleStart string `json:"sample_start"` + TimestampField *string `json:"timestamp_field,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + FieldStats: make(map[string]types.FieldStat, 0), + } + return r +} diff --git a/typedapi/textstructure/findmessagestructure/find_message_structure.go b/typedapi/textstructure/findmessagestructure/find_message_structure.go index 1c7179f214..bbe927a2a5 100644 --- a/typedapi/textstructure/findmessagestructure/find_message_structure.go +++ b/typedapi/textstructure/findmessagestructure/find_message_structure.go @@ -16,22 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Finds the structure of a list of messages. The messages must contain data -// that is suitable to be ingested into Elasticsearch. +// Find the structure of text messages. +// Find the structure of a list of text messages. +// The messages must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Use this API rather than the find text structure API if your input text has +// already been split up into separate messages by some other process. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. package findmessagestructure import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/formattype" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -46,6 +79,10 @@ type FindMessageStructure struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -66,8 +103,35 @@ func NewFindMessageStructureFunc(tp elastictransport.Interface) NewFindMessageSt } } -// Finds the structure of a list of messages. The messages must contain data -// that is suitable to be ingested into Elasticsearch. +// Find the structure of text messages. +// Find the structure of a list of text messages. +// The messages must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Use this API rather than the find text structure API if your input text has +// already been split up into separate messages by some other process. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-message-structure.html func New(tp elastictransport.Interface) *FindMessageStructure { @@ -75,6 +139,8 @@ func New(tp elastictransport.Interface) *FindMessageStructure { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -86,6 +152,21 @@ func New(tp elastictransport.Interface) *FindMessageStructure { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *FindMessageStructure) Raw(raw io.Reader) *FindMessageStructure { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *FindMessageStructure) Request(req *Request) *FindMessageStructure { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *FindMessageStructure) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -95,6 +176,31 @@ func (r *FindMessageStructure) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for FindMessageStructure: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -182,13 +288,7 @@ func (r FindMessageStructure) Perform(providedCtx context.Context) (*http.Respon } // Do runs the request through the transport, handle the response and returns a findmessagestructure.Response -func (r FindMessageStructure) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r FindMessageStructure) IsSuccess(providedCtx context.Context) (bool, error) { +func (r FindMessageStructure) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -199,30 +299,46 @@ func (r FindMessageStructure) IsSuccess(providedCtx context.Context) (bool, erro ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the FindMessageStructure query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode } - return false, nil + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the FindMessageStructure headers map. @@ -231,3 +347,258 @@ func (r *FindMessageStructure) Header(key, value string) *FindMessageStructure { return r } + +// ColumnNames If the format is `delimited`, you can specify the column names in a +// comma-separated list. +// If this parameter is not specified, the structure finder uses the column +// names from the header row of the text. +// If the text does not have a header role, columns are named "column1", +// "column2", "column3", for example. +// API name: column_names +func (r *FindMessageStructure) ColumnNames(columnnames string) *FindMessageStructure { + r.values.Set("column_names", columnnames) + + return r +} + +// Delimiter If you the format is `delimited`, you can specify the character used to +// delimit the values in each row. +// Only a single character is supported; the delimiter cannot have multiple +// characters. +// By default, the API considers the following possibilities: comma, tab, +// semi-colon, and pipe (`|`). +// In this default scenario, all rows must have the same number of fields for +// the delimited format to be detected. +// If you specify a delimiter, up to 10% of the rows can have a different number +// of columns than the first row. +// API name: delimiter +func (r *FindMessageStructure) Delimiter(delimiter string) *FindMessageStructure { + r.values.Set("delimiter", delimiter) + + return r +} + +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// This setting primarily has an impact when a whole message Grok pattern such +// as `%{CATALINALOG}` matches the input. +// If the structure finder identifies a common structure but has no idea of +// meaning then generic field names such as `path`, `ipaddress`, `field1`, and +// `field2` are used in the `grok_pattern` output, with the intention that a +// user who knows the meanings rename these fields before using it. +// API name: ecs_compatibility +func (r *FindMessageStructure) EcsCompatibility(ecscompatibility ecscompatibilitytype.EcsCompatibilityType) *FindMessageStructure { + r.values.Set("ecs_compatibility", ecscompatibility.String()) + + return r +} + +// Explain If this parameter is set to true, the response includes a field named +// `explanation`, which is an array of strings that indicate how the structure +// finder produced its result. +// API name: explain +func (r *FindMessageStructure) Explain(explain bool) *FindMessageStructure { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// Format The high level structure of the text. +// By default, the API chooses the format. +// In this default scenario, all rows must have the same number of fields for a +// delimited format to be detected. +// If the format is `delimited` and the delimiter is not set, however, the API +// tolerates up to 5% of rows that have a different number of columns than the +// first row. +// API name: format +func (r *FindMessageStructure) Format(format formattype.FormatType) *FindMessageStructure { + r.values.Set("format", format.String()) + + return r +} + +// GrokPattern If the format is `semi_structured_text`, you can specify a Grok pattern that +// is used to extract fields from every message in the text. +// The name of the timestamp field in the Grok pattern must match what is +// specified in the `timestamp_field` parameter. +// If that parameter is not specified, the name of the timestamp field in the +// Grok pattern must match "timestamp". +// If `grok_pattern` is not specified, the structure finder creates a Grok +// pattern. +// API name: grok_pattern +func (r *FindMessageStructure) GrokPattern(grokpattern string) *FindMessageStructure { + r.values.Set("grok_pattern", grokpattern) + + return r +} + +// Quote If the format is `delimited`, you can specify the character used to quote the +// values in each row if they contain newlines or the delimiter character. +// Only a single character is supported. +// If this parameter is not specified, the default value is a double quote +// (`"`). +// If your delimited text format does not use quoting, a workaround is to set +// this argument to a character that does not appear anywhere in the sample. +// API name: quote +func (r *FindMessageStructure) Quote(quote string) *FindMessageStructure { + r.values.Set("quote", quote) + + return r +} + +// ShouldTrimFields If the format is `delimited`, you can specify whether values between +// delimiters should have whitespace trimmed from them. +// If this parameter is not specified and the delimiter is pipe (`|`), the +// default value is true. +// Otherwise, the default value is `false`. +// API name: should_trim_fields +func (r *FindMessageStructure) ShouldTrimFields(shouldtrimfields bool) *FindMessageStructure { + r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) + + return r +} + +// Timeout The maximum amount of time that the structure analysis can take. +// If the analysis is still running when the timeout expires, it will be +// stopped. +// API name: timeout +func (r *FindMessageStructure) Timeout(duration string) *FindMessageStructure { + r.values.Set("timeout", duration) + + return r +} + +// TimestampField The name of the field that contains the primary timestamp of each record in +// the text. +// In particular, if the text was ingested into an index, this is the field that +// would be used to populate the `@timestamp` field. +// +// If the format is `semi_structured_text`, this field must match the name of +// the appropriate extraction in the `grok_pattern`. +// Therefore, for semi-structured text, it is best not to specify this parameter +// unless `grok_pattern` is also specified. +// +// For structured text, if you specify this parameter, the field must exist +// within the text. +// +// If this parameter is not specified, the structure finder makes a decision +// about which field (if any) is the primary timestamp field. +// For structured text, it is not compulsory to have a timestamp in the text. +// API name: timestamp_field +func (r *FindMessageStructure) TimestampField(field string) *FindMessageStructure { + r.values.Set("timestamp_field", field) + + return r +} + +// TimestampFormat The Java time format of the timestamp field in the text. +// Only a subset of Java time format letter groups are supported: +// +// * `a` +// * `d` +// * `dd` +// * `EEE` +// * `EEEE` +// * `H` +// * `HH` +// * `h` +// * `M` +// * `MM` +// * `MMM` +// * `MMMM` +// * `mm` +// * `ss` +// * `XX` +// * `XXX` +// * `yy` +// * `yyyy` +// * `zzz` +// +// Additionally `S` letter groups (fractional seconds) of length one to nine are +// supported providing they occur after `ss` and are separated from the `ss` by +// a period (`.`), comma (`,`), or colon (`:`). +// Spacing and punctuation is also permitted with the exception a question mark +// (`?`), newline, and carriage return, together with literal text enclosed in +// single quotes. +// For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +// +// One valuable use case for this parameter is when the format is +// semi-structured text, there are multiple timestamp formats in the text, and +// you know which format corresponds to the primary timestamp, but you do not +// want to specify the full `grok_pattern`. +// Another is when the timestamp format is one that the structure finder does +// not consider by default. +// +// If this parameter is not specified, the structure finder chooses the best +// format from a built-in set. +// +// If the special value `null` is specified, the structure finder will not look +// for a primary timestamp in the text. +// When the format is semi-structured text, this will result in the structure +// finder treating the text as single-line messages. +// API name: timestamp_format +func (r *FindMessageStructure) TimestampFormat(timestampformat string) *FindMessageStructure { + r.values.Set("timestamp_format", timestampformat) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FindMessageStructure) ErrorTrace(errortrace bool) *FindMessageStructure { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FindMessageStructure) FilterPath(filterpaths ...string) *FindMessageStructure { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FindMessageStructure) Human(human bool) *FindMessageStructure { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FindMessageStructure) Pretty(pretty bool) *FindMessageStructure { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The list of messages you want to analyze. +// API name: messages +func (r *FindMessageStructure) Messages(messages ...string) *FindMessageStructure { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range messages { + + r.req.Messages = append(r.req.Messages, v) + + } + return r +} diff --git a/typedapi/textstructure/findmessagestructure/request.go b/typedapi/textstructure/findmessagestructure/request.go new file mode 100644 index 0000000000..6ab16830fb --- /dev/null +++ b/typedapi/textstructure/findmessagestructure/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package findmessagestructure + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package findmessagestructure +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/find_message_structure/FindMessageStructureRequest.ts#L25-L174 +type Request struct { + + // Messages The list of messages you want to analyze. + Messages []string `json:"messages"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Findmessagestructure request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/textstructure/findmessagestructure/response.go b/typedapi/textstructure/findmessagestructure/response.go new file mode 100644 index 0000000000..ab470998af --- /dev/null +++ b/typedapi/textstructure/findmessagestructure/response.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package findmessagestructure + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/formattype" +) + +// Response holds the response body struct for the package findmessagestructure +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/find_message_structure/FindMessageStructureResponse.ts#L31-L49 +type Response struct { + Charset string `json:"charset"` + EcsCompatibility *ecscompatibilitytype.EcsCompatibilityType `json:"ecs_compatibility,omitempty"` + FieldStats map[string]types.FieldStat `json:"field_stats"` + Format formattype.FormatType `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + NeedClientTimezone bool `json:"need_client_timezone"` + NumLinesAnalyzed int `json:"num_lines_analyzed"` + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + SampleStart string `json:"sample_start"` + TimestampField *string `json:"timestamp_field,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + FieldStats: make(map[string]types.FieldStat, 0), + } + return r +} diff --git a/typedapi/textstructure/findstructure/find_structure.go b/typedapi/textstructure/findstructure/find_structure.go index 6d45999e3c..886b0e5fe5 100644 --- a/typedapi/textstructure/findstructure/find_structure.go +++ b/typedapi/textstructure/findstructure/find_structure.go @@ -16,10 +16,35 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Finds the structure of a text file. The text file must contain data that is -// suitable to be ingested into Elasticsearch. +// Find the structure of a text file. +// The text file must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Unlike other Elasticsearch endpoints, the data that is posted to this +// endpoint does not need to be UTF-8 encoded and in JSON format. +// It must, however, be text; binary text formats are not currently supported. +// The size is limited to the Elasticsearch HTTP receive buffer size, which +// defaults to 100 Mb. +// +// The response from the API contains: +// +// * A couple of messages from the beginning of the text. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. package findstructure import ( @@ -74,8 +99,33 @@ func NewFindStructureFunc(tp elastictransport.Interface) NewFindStructure { } } -// Finds the structure of a text file. The text file must contain data that is -// suitable to be ingested into Elasticsearch. +// Find the structure of a text file. +// The text file must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Unlike other Elasticsearch endpoints, the data that is posted to this +// endpoint does not need to be UTF-8 encoded and in JSON format. +// It must, however, be text; binary text formats are not currently supported. +// The size is limited to the Elasticsearch HTTP receive buffer size, which +// defaults to 100 Mb. +// +// The response from the API contains: +// +// * A couple of messages from the beginning of the text. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html func New(tp elastictransport.Interface) *FindStructure { @@ -85,8 +135,6 @@ func New(tp elastictransport.Interface) *FindStructure { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -299,9 +347,11 @@ func (r *FindStructure) Header(key, value string) *FindStructure { return r } -// Charset The text’s character set. It must be a character set that is supported by the -// JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or -// EUC-JP. If this parameter is not specified, the structure finder chooses an +// Charset The text's character set. +// It must be a character set that is supported by the JVM that Elasticsearch +// uses. +// For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +// If this parameter is not specified, the structure finder chooses an // appropriate character set. // API name: charset func (r *FindStructure) Charset(charset string) *FindStructure { @@ -310,11 +360,12 @@ func (r *FindStructure) Charset(charset string) *FindStructure { return r } -// ColumnNames If you have set format to delimited, you can specify the column names in a -// comma-separated list. If this parameter is not specified, the structure -// finder uses the column names from the header row of the text. If the text -// does not have a header role, columns are named "column1", "column2", -// "column3", etc. +// ColumnNames If you have set format to `delimited`, you can specify the column names in a +// comma-separated list. +// If this parameter is not specified, the structure finder uses the column +// names from the header row of the text. +// If the text does not have a header role, columns are named "column1", +// "column2", "column3", for example. // API name: column_names func (r *FindStructure) ColumnNames(columnnames string) *FindStructure { r.values.Set("column_names", columnnames) @@ -322,13 +373,16 @@ func (r *FindStructure) ColumnNames(columnnames string) *FindStructure { return r } -// Delimiter If you have set format to delimited, you can specify the character used to -// delimit the values in each row. Only a single character is supported; the -// delimiter cannot have multiple characters. By default, the API considers the -// following possibilities: comma, tab, semi-colon, and pipe (|). In this -// default scenario, all rows must have the same number of fields for the -// delimited format to be detected. If you specify a delimiter, up to 10% of the -// rows can have a different number of columns than the first row. +// Delimiter If you have set `format` to `delimited`, you can specify the character used +// to delimit the values in each row. +// Only a single character is supported; the delimiter cannot have multiple +// characters. +// By default, the API considers the following possibilities: comma, tab, +// semi-colon, and pipe (`|`). +// In this default scenario, all rows must have the same number of fields for +// the delimited format to be detected. +// If you specify a delimiter, up to 10% of the rows can have a different number +// of columns than the first row. // API name: delimiter func (r *FindStructure) Delimiter(delimiter string) *FindStructure { r.values.Set("delimiter", delimiter) @@ -336,8 +390,16 @@ func (r *FindStructure) Delimiter(delimiter string) *FindStructure { return r } -// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns (disabled or v1, -// default: disabled). +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// Valid values are `disabled` and `v1`. +// This setting primarily has an impact when a whole message Grok pattern such +// as `%{CATALINALOG}` matches the input. +// If the structure finder identifies a common structure but has no idea of +// meaning then generic field names such as `path`, `ipaddress`, `field1`, and +// `field2` are used in the `grok_pattern` output, with the intention that a +// user who knows the meanings rename these fields before using it. // API name: ecs_compatibility func (r *FindStructure) EcsCompatibility(ecscompatibility string) *FindStructure { r.values.Set("ecs_compatibility", ecscompatibility) @@ -345,9 +407,11 @@ func (r *FindStructure) EcsCompatibility(ecscompatibility string) *FindStructure return r } -// Explain If this parameter is set to true, the response includes a field named +// Explain If this parameter is set to `true`, the response includes a field named // explanation, which is an array of strings that indicate how the structure // finder produced its result. +// If the structure finder produces unexpected results for some text, use this +// query parameter to help you determine why the returned structure was chosen. // API name: explain func (r *FindStructure) Explain(explain bool) *FindStructure { r.values.Set("explain", strconv.FormatBool(explain)) @@ -355,12 +419,14 @@ func (r *FindStructure) Explain(explain bool) *FindStructure { return r } -// Format The high level structure of the text. Valid values are ndjson, xml, -// delimited, and semi_structured_text. By default, the API chooses the format. +// Format The high level structure of the text. +// Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +// By default, the API chooses the format. // In this default scenario, all rows must have the same number of fields for a -// delimited format to be detected. If the format is set to delimited and the -// delimiter is not set, however, the API tolerates up to 5% of rows that have a -// different number of columns than the first row. +// delimited format to be detected. +// If the format is set to `delimited` and the delimiter is not set, however, +// the API tolerates up to 5% of rows that have a different number of columns +// than the first row. // API name: format func (r *FindStructure) Format(format string) *FindStructure { r.values.Set("format", format) @@ -368,12 +434,14 @@ func (r *FindStructure) Format(format string) *FindStructure { return r } -// GrokPattern If you have set format to semi_structured_text, you can specify a Grok -// pattern that is used to extract fields from every message in the text. The -// name of the timestamp field in the Grok pattern must match what is specified -// in the timestamp_field parameter. If that parameter is not specified, the -// name of the timestamp field in the Grok pattern must match "timestamp". If -// grok_pattern is not specified, the structure finder creates a Grok pattern. +// GrokPattern If you have set `format` to `semi_structured_text`, you can specify a Grok +// pattern that is used to extract fields from every message in the text. +// The name of the timestamp field in the Grok pattern must match what is +// specified in the `timestamp_field` parameter. +// If that parameter is not specified, the name of the timestamp field in the +// Grok pattern must match "timestamp". +// If `grok_pattern` is not specified, the structure finder creates a Grok +// pattern. // API name: grok_pattern func (r *FindStructure) GrokPattern(grokpattern string) *FindStructure { r.values.Set("grok_pattern", grokpattern) @@ -381,10 +449,10 @@ func (r *FindStructure) GrokPattern(grokpattern string) *FindStructure { return r } -// HasHeaderRow If you have set format to delimited, you can use this parameter to indicate -// whether the column names are in the first row of the text. If this parameter -// is not specified, the structure finder guesses based on the similarity of the -// first row of the text to other rows. +// HasHeaderRow If you have set `format` to `delimited`, you can use this parameter to +// indicate whether the column names are in the first row of the text. +// If this parameter is not specified, the structure finder guesses based on the +// similarity of the first row of the text to other rows. // API name: has_header_row func (r *FindStructure) HasHeaderRow(hasheaderrow bool) *FindStructure { r.values.Set("has_header_row", strconv.FormatBool(hasheaderrow)) @@ -393,10 +461,10 @@ func (r *FindStructure) HasHeaderRow(hasheaderrow bool) *FindStructure { } // LineMergeSizeLimit The maximum number of characters in a message when lines are merged to form -// messages while analyzing semi-structured text. If you have extremely long -// messages you may need to increase this, but be aware that this may lead to -// very long processing times if the way to group lines into messages is -// misdetected. +// messages while analyzing semi-structured text. +// If you have extremely long messages you may need to increase this, but be +// aware that this may lead to very long processing times if the way to group +// lines into messages is misdetected. // API name: line_merge_size_limit func (r *FindStructure) LineMergeSizeLimit(linemergesizelimit string) *FindStructure { r.values.Set("line_merge_size_limit", linemergesizelimit) @@ -405,9 +473,20 @@ func (r *FindStructure) LineMergeSizeLimit(linemergesizelimit string) *FindStruc } // LinesToSample The number of lines to include in the structural analysis, starting from the -// beginning of the text. The minimum is 2; If the value of this parameter is -// greater than the number of lines in the text, the analysis proceeds (as long -// as there are at least two lines in the text) for all of the lines. +// beginning of the text. +// The minimum is 2. +// If the value of this parameter is greater than the number of lines in the +// text, the analysis proceeds (as long as there are at least two lines in the +// text) for all of the lines. +// +// NOTE: The number of lines and the variation of the lines affects the speed of +// the analysis. +// For example, if you upload text where the first 1000 lines are all variations +// on the same message, the analysis will find more commonality than would be +// seen with a bigger sample. +// If possible, however, it is more efficient to upload sample text with more +// variety in the first 1000 lines than to request analysis of 100000 lines to +// achieve some variety. // API name: lines_to_sample func (r *FindStructure) LinesToSample(linestosample string) *FindStructure { r.values.Set("lines_to_sample", linestosample) @@ -415,12 +494,14 @@ func (r *FindStructure) LinesToSample(linestosample string) *FindStructure { return r } -// Quote If you have set format to delimited, you can specify the character used to -// quote the values in each row if they contain newlines or the delimiter -// character. Only a single character is supported. If this parameter is not -// specified, the default value is a double quote ("). If your delimited text -// format does not use quoting, a workaround is to set this argument to a -// character that does not appear anywhere in the sample. +// Quote If you have set `format` to `delimited`, you can specify the character used +// to quote the values in each row if they contain newlines or the delimiter +// character. +// Only a single character is supported. +// If this parameter is not specified, the default value is a double quote +// (`"`). +// If your delimited text format does not use quoting, a workaround is to set +// this argument to a character that does not appear anywhere in the sample. // API name: quote func (r *FindStructure) Quote(quote string) *FindStructure { r.values.Set("quote", quote) @@ -428,10 +509,11 @@ func (r *FindStructure) Quote(quote string) *FindStructure { return r } -// ShouldTrimFields If you have set format to delimited, you can specify whether values between -// delimiters should have whitespace trimmed from them. If this parameter is not -// specified and the delimiter is pipe (|), the default value is true. -// Otherwise, the default value is false. +// ShouldTrimFields If you have set `format` to `delimited`, you can specify whether values +// between delimiters should have whitespace trimmed from them. +// If this parameter is not specified and the delimiter is pipe (`|`), the +// default value is `true`. +// Otherwise, the default value is `false`. // API name: should_trim_fields func (r *FindStructure) ShouldTrimFields(shouldtrimfields bool) *FindStructure { r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) @@ -439,8 +521,9 @@ func (r *FindStructure) ShouldTrimFields(shouldtrimfields bool) *FindStructure { return r } -// Timeout Sets the maximum amount of time that the structure analysis make take. If the -// analysis is still running when the timeout expires then it will be aborted. +// Timeout The maximum amount of time that the structure analysis can take. +// If the analysis is still running when the timeout expires then it will be +// stopped. // API name: timeout func (r *FindStructure) Timeout(duration string) *FindStructure { r.values.Set("timeout", duration) @@ -448,7 +531,22 @@ func (r *FindStructure) Timeout(duration string) *FindStructure { return r } -// TimestampField Optional parameter to specify the timestamp field in the file +// TimestampField The name of the field that contains the primary timestamp of each record in +// the text. +// In particular, if the text were ingested into an index, this is the field +// that would be used to populate the `@timestamp` field. +// +// If the `format` is `semi_structured_text`, this field must match the name of +// the appropriate extraction in the `grok_pattern`. +// Therefore, for semi-structured text, it is best not to specify this parameter +// unless `grok_pattern` is also specified. +// +// For structured text, if you specify this parameter, the field must exist +// within the text. +// +// If this parameter is not specified, the structure finder makes a decision +// about which field (if any) is the primary timestamp field. +// For structured text, it is not compulsory to have a timestamp in the text. // API name: timestamp_field func (r *FindStructure) TimestampField(field string) *FindStructure { r.values.Set("timestamp_field", field) @@ -457,6 +555,50 @@ func (r *FindStructure) TimestampField(field string) *FindStructure { } // TimestampFormat The Java time format of the timestamp field in the text. +// +// Only a subset of Java time format letter groups are supported: +// +// * `a` +// * `d` +// * `dd` +// * `EEE` +// * `EEEE` +// * `H` +// * `HH` +// * `h` +// * `M` +// * `MM` +// * `MMM` +// * `MMMM` +// * `mm` +// * `ss` +// * `XX` +// * `XXX` +// * `yy` +// * `yyyy` +// * `zzz` +// +// Additionally `S` letter groups (fractional seconds) of length one to nine are +// supported providing they occur after `ss` and separated from the `ss` by a +// `.`, `,` or `:`. +// Spacing and punctuation is also permitted with the exception of `?`, newline +// and carriage return, together with literal text enclosed in single quotes. +// For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +// +// One valuable use case for this parameter is when the format is +// semi-structured text, there are multiple timestamp formats in the text, and +// you know which format corresponds to the primary timestamp, but you do not +// want to specify the full `grok_pattern`. +// Another is when the timestamp format is one that the structure finder does +// not consider by default. +// +// If this parameter is not specified, the structure finder chooses the best +// format from a built-in set. +// +// If the special value `null` is specified the structure finder will not look +// for a primary timestamp in the text. +// When the format is semi-structured text this will result in the structure +// finder treating the text as single-line messages. // API name: timestamp_format func (r *FindStructure) TimestampFormat(timestampformat string) *FindStructure { r.values.Set("timestamp_format", timestampformat) diff --git a/typedapi/textstructure/findstructure/request.go b/typedapi/textstructure/findstructure/request.go index 39469f772d..b1fc50a4f8 100644 --- a/typedapi/textstructure/findstructure/request.go +++ b/typedapi/textstructure/findstructure/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package findstructure @@ -26,7 +26,7 @@ import ( // Request holds the request body struct for the package findstructure // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/text_structure/find_structure/FindStructureRequest.ts#L24-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/find_structure/FindStructureRequest.ts#L24-L207 type Request = []json.RawMessage // NewRequest returns a Request diff --git a/typedapi/textstructure/findstructure/response.go b/typedapi/textstructure/findstructure/response.go index 52ea08af89..fb3fc31850 100644 --- a/typedapi/textstructure/findstructure/response.go +++ b/typedapi/textstructure/findstructure/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package findstructure @@ -26,30 +26,58 @@ import ( // Response holds the response body struct for the package findstructure // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/text_structure/find_structure/FindStructureResponse.ts#L27-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/find_structure/FindStructureResponse.ts#L27-L97 type Response struct { - Charset string `json:"charset"` - ColumnNames []string `json:"column_names,omitempty"` - Delimiter *string `json:"delimiter,omitempty"` - ExcludeLinesPattern *string `json:"exclude_lines_pattern,omitempty"` - Explanation []string `json:"explanation,omitempty"` - FieldStats map[string]types.FieldStat `json:"field_stats"` - Format string `json:"format"` - GrokPattern *string `json:"grok_pattern,omitempty"` - HasByteOrderMarker bool `json:"has_byte_order_marker"` - HasHeaderRow *bool `json:"has_header_row,omitempty"` - IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` - JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` - JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` - Mappings types.TypeMapping `json:"mappings"` - MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` - NeedClientTimezone bool `json:"need_client_timezone"` - NumLinesAnalyzed int `json:"num_lines_analyzed"` - NumMessagesAnalyzed int `json:"num_messages_analyzed"` - Quote *string `json:"quote,omitempty"` - SampleStart string `json:"sample_start"` - ShouldTrimFields *bool `json:"should_trim_fields,omitempty"` - TimestampField *string `json:"timestamp_field,omitempty"` + + // Charset The character encoding used to parse the text. + Charset string `json:"charset"` + // ColumnNames If `format` is `delimited`, the `column_names` field lists the column names + // in the order they appear in the sample. + ColumnNames []string `json:"column_names,omitempty"` + Delimiter *string `json:"delimiter,omitempty"` + ExcludeLinesPattern *string `json:"exclude_lines_pattern,omitempty"` + Explanation []string `json:"explanation,omitempty"` + // FieldStats The most common values of each field, plus basic numeric statistics for the + // numeric `page_count` field. + // This information may provide clues that the data needs to be cleaned or + // transformed prior to use by other Elastic Stack functionality. + FieldStats map[string]types.FieldStat `json:"field_stats"` + // Format Valid values include `ndjson`, `xml`, `delimited`, and + // `semi_structured_text`. + Format string `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + // HasByteOrderMarker For UTF character encodings, it indicates whether the text begins with a byte + // order marker. + HasByteOrderMarker bool `json:"has_byte_order_marker"` + HasHeaderRow *bool `json:"has_header_row,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + // JavaTimestampFormats The Java time formats recognized in the time fields. + // Elasticsearch mappings and ingest pipelines use this format. + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + // JodaTimestampFormats Information that is used to tell Logstash how to parse timestamps. + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + // Mappings Some suitable mappings for an index into which the data could be ingested. + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + // NeedClientTimezone If a timestamp format is detected that does not include a timezone, + // `need_client_timezone` is `true`. + // The server that parses the text must therefore be told the correct timezone + // by the client. + NeedClientTimezone bool `json:"need_client_timezone"` + // NumLinesAnalyzed The number of lines of the text that were analyzed. + NumLinesAnalyzed int `json:"num_lines_analyzed"` + // NumMessagesAnalyzed The number of distinct messages the lines contained. + // For NDJSON, this value is the same as `num_lines_analyzed`. + // For other text formats, messages can span several lines. + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + Quote *string `json:"quote,omitempty"` + // SampleStart The first two messages in the text verbatim. + // This may help diagnose parse errors or accidental uploads of the wrong text. + SampleStart string `json:"sample_start"` + ShouldTrimFields *bool `json:"should_trim_fields,omitempty"` + // TimestampField The field considered most likely to be the primary timestamp of each + // document. + TimestampField *string `json:"timestamp_field,omitempty"` } // NewResponse returns a Response diff --git a/typedapi/textstructure/testgrokpattern/request.go b/typedapi/textstructure/testgrokpattern/request.go index d2d9218482..38495a5ff3 100644 --- a/typedapi/textstructure/testgrokpattern/request.go +++ b/typedapi/textstructure/testgrokpattern/request.go @@ -16,23 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package testgrokpattern import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" ) // Request holds the request body struct for the package testgrokpattern // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/text_structure/test_grok_pattern/TestGrokPatternRequest.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/test_grok_pattern/TestGrokPatternRequest.ts#L23-L59 type Request struct { - // GrokPattern Grok pattern to run on the text. + // GrokPattern The Grok pattern to run on the text. GrokPattern string `json:"grok_pattern"` - // Text Lines of text to run the Grok pattern on. + // Text The lines of text to run the Grok pattern on. Text []string `json:"text"` } @@ -54,3 +57,32 @@ func (r *Request) FromJSON(data string) (*Request, error) { return &req, nil } + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "grok_pattern": + if err := dec.Decode(&s.GrokPattern); err != nil { + return fmt.Errorf("%s | %w", "GrokPattern", err) + } + + case "text": + if err := dec.Decode(&s.Text); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + + } + } + return nil +} diff --git a/typedapi/textstructure/testgrokpattern/response.go b/typedapi/textstructure/testgrokpattern/response.go index 331ac38099..777fc95cbd 100644 --- a/typedapi/textstructure/testgrokpattern/response.go +++ b/typedapi/textstructure/testgrokpattern/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package testgrokpattern @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package testgrokpattern // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/text_structure/test_grok_pattern/TestGrokPatternResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/test_grok_pattern/TestGrokPatternResponse.ts#L22-L26 type Response struct { Matches []types.MatchedText `json:"matches"` } diff --git a/typedapi/textstructure/testgrokpattern/test_grok_pattern.go b/typedapi/textstructure/testgrokpattern/test_grok_pattern.go index 34dbe31855..ee14eefbc1 100644 --- a/typedapi/textstructure/testgrokpattern/test_grok_pattern.go +++ b/typedapi/textstructure/testgrokpattern/test_grok_pattern.go @@ -16,9 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Tests a Grok pattern on some text. +// Test a Grok pattern. +// Test a Grok pattern on one or more lines of text. +// The API indicates whether the lines match the pattern together with the +// offsets and lengths of the matched substrings. package testgrokpattern import ( @@ -73,7 +76,10 @@ func NewTestGrokPatternFunc(tp elastictransport.Interface) NewTestGrokPattern { } } -// Tests a Grok pattern on some text. +// Test a Grok pattern. +// Test a Grok pattern on one or more lines of text. +// The API indicates whether the lines match the pattern together with the +// offsets and lengths of the matched substrings. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/test-grok-pattern.html func New(tp elastictransport.Interface) *TestGrokPattern { @@ -83,8 +89,6 @@ func New(tp elastictransport.Interface) *TestGrokPattern { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -292,8 +296,10 @@ func (r *TestGrokPattern) Header(key, value string) *TestGrokPattern { return r } -// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns (disabled or v1, -// default: disabled). +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// Valid values are `disabled` and `v1`. // API name: ecs_compatibility func (r *TestGrokPattern) EcsCompatibility(ecscompatibility string) *TestGrokPattern { r.values.Set("ecs_compatibility", ecscompatibility) @@ -345,19 +351,30 @@ func (r *TestGrokPattern) Pretty(pretty bool) *TestGrokPattern { return r } -// GrokPattern Grok pattern to run on the text. +// The Grok pattern to run on the text. // API name: grok_pattern func (r *TestGrokPattern) GrokPattern(grokpattern string) *TestGrokPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.GrokPattern = grokpattern return r } -// Text Lines of text to run the Grok pattern on. +// The lines of text to run the Grok pattern on. // API name: text func (r *TestGrokPattern) Text(texts ...string) *TestGrokPattern { - r.req.Text = texts + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range texts { + r.req.Text = append(r.req.Text, v) + + } return r } diff --git a/typedapi/transform/deletetransform/delete_transform.go b/typedapi/transform/deletetransform/delete_transform.go index 045e80ad4d..c4e7829524 100644 --- a/typedapi/transform/deletetransform/delete_transform.go +++ b/typedapi/transform/deletetransform/delete_transform.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Delete a transform. -// Deletes a transform. package deletetransform import ( @@ -78,7 +77,6 @@ func NewDeleteTransformFunc(tp elastictransport.Interface) NewDeleteTransform { } // Delete a transform. -// Deletes a transform. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html func New(tp elastictransport.Interface) *DeleteTransform { diff --git a/typedapi/transform/deletetransform/response.go b/typedapi/transform/deletetransform/response.go index 3af17fb011..297e92bce6 100644 --- a/typedapi/transform/deletetransform/response.go +++ b/typedapi/transform/deletetransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletetransform // Response holds the response body struct for the package deletetransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/delete_transform/DeleteTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/delete_transform/DeleteTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/getnodestats/get_node_stats.go b/typedapi/transform/getnodestats/get_node_stats.go index bf9ae5aede..1bf3cd8ff7 100644 --- a/typedapi/transform/getnodestats/get_node_stats.go +++ b/typedapi/transform/getnodestats/get_node_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Retrieves transform usage information for transform nodes. package getnodestats diff --git a/typedapi/transform/gettransform/get_transform.go b/typedapi/transform/gettransform/get_transform.go index 2cbbeea477..1ff67f2f1f 100644 --- a/typedapi/transform/gettransform/get_transform.go +++ b/typedapi/transform/gettransform/get_transform.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get transforms. -// Retrieves configuration information for transforms. +// Get configuration information for transforms. package gettransform import ( @@ -76,7 +76,7 @@ func NewGetTransformFunc(tp elastictransport.Interface) NewGetTransform { } // Get transforms. -// Retrieves configuration information for transforms. +// Get configuration information for transforms. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html func New(tp elastictransport.Interface) *GetTransform { diff --git a/typedapi/transform/gettransform/response.go b/typedapi/transform/gettransform/response.go index d21ce9ac05..460e3e12e7 100644 --- a/typedapi/transform/gettransform/response.go +++ b/typedapi/transform/gettransform/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package gettransform @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/get_transform/GetTransformResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/get_transform/GetTransformResponse.ts#L23-L25 type Response struct { Count int64 `json:"count"` Transforms []types.TransformSummary `json:"transforms"` diff --git a/typedapi/transform/gettransformstats/get_transform_stats.go b/typedapi/transform/gettransformstats/get_transform_stats.go index 5e83fb3f5e..6b7e174cbf 100644 --- a/typedapi/transform/gettransformstats/get_transform_stats.go +++ b/typedapi/transform/gettransformstats/get_transform_stats.go @@ -16,10 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Get transform stats. -// Retrieves usage information for transforms. +// +// Get usage information for transforms. package gettransformstats import ( @@ -78,7 +79,8 @@ func NewGetTransformStatsFunc(tp elastictransport.Interface) NewGetTransformStat } // Get transform stats. -// Retrieves usage information for transforms. +// +// Get usage information for transforms. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html func New(tp elastictransport.Interface) *GetTransformStats { diff --git a/typedapi/transform/gettransformstats/response.go b/typedapi/transform/gettransformstats/response.go index 8fa8257200..783933e58c 100644 --- a/typedapi/transform/gettransformstats/response.go +++ b/typedapi/transform/gettransformstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package gettransformstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettransformstats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/get_transform_stats/GetTransformStatsResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/get_transform_stats/GetTransformStatsResponse.ts#L23-L25 type Response struct { Count int64 `json:"count"` Transforms []types.TransformStats `json:"transforms"` diff --git a/typedapi/transform/previewtransform/preview_transform.go b/typedapi/transform/previewtransform/preview_transform.go index af0365da8a..5c4f08ea9a 100644 --- a/typedapi/transform/previewtransform/preview_transform.go +++ b/typedapi/transform/previewtransform/preview_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Preview a transform. // Generates a preview of the results that you will get when you create a @@ -105,8 +105,6 @@ func New(tp elastictransport.Interface) *PreviewTransform { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -391,89 +389,126 @@ func (r *PreviewTransform) Pretty(pretty bool) *PreviewTransform { return r } -// Description Free text description of the transform. +// Free text description of the transform. // API name: description func (r *PreviewTransform) Description(description string) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination for the transform. +// The destination for the transform. // API name: dest -func (r *PreviewTransform) Dest(dest *types.TransformDestination) *PreviewTransform { +func (r *PreviewTransform) Dest(dest types.TransformDestinationVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = dest + r.req.Dest = dest.TransformDestinationCaster() return r } -// Frequency The interval between checks for changes in the source indices when the +// The interval between checks for changes in the source indices when the // transform is running continuously. Also determines the retry interval in // the event of transient failures while the transform is searching or // indexing. The minimum value is 1s and the maximum is 1h. // API name: frequency -func (r *PreviewTransform) Frequency(duration types.Duration) *PreviewTransform { - r.req.Frequency = duration +func (r *PreviewTransform) Frequency(duration types.DurationVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } -// Latest The latest method transforms the data by finding the latest document for +// The latest method transforms the data by finding the latest document for // each unique key. // API name: latest -func (r *PreviewTransform) Latest(latest *types.Latest) *PreviewTransform { +func (r *PreviewTransform) Latest(latest types.LatestVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Latest = latest + r.req.Latest = latest.LatestCaster() return r } -// Pivot The pivot method transforms the data by aggregating and grouping it. +// The pivot method transforms the data by aggregating and grouping it. // These objects define the group by fields and the aggregation to reduce // the data. // API name: pivot -func (r *PreviewTransform) Pivot(pivot *types.Pivot) *PreviewTransform { +func (r *PreviewTransform) Pivot(pivot types.PivotVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pivot = pivot + r.req.Pivot = pivot.PivotCaster() return r } -// RetentionPolicy Defines a retention policy for the transform. Data that meets the defined +// Defines a retention policy for the transform. Data that meets the defined // criteria is deleted from the destination index. // API name: retention_policy -func (r *PreviewTransform) RetentionPolicy(retentionpolicy *types.RetentionPolicyContainer) *PreviewTransform { +func (r *PreviewTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainerVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.RetentionPolicy = retentionpolicy + r.req.RetentionPolicy = retentionpolicy.RetentionPolicyContainerCaster() return r } -// Settings Defines optional transform settings. +// Defines optional transform settings. // API name: settings -func (r *PreviewTransform) Settings(settings *types.Settings) *PreviewTransform { +func (r *PreviewTransform) Settings(settings types.SettingsVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.SettingsCaster() return r } -// Source The source of the data for the transform. +// The source of the data for the transform. // API name: source -func (r *PreviewTransform) Source(source *types.TransformSource) *PreviewTransform { +func (r *PreviewTransform) Source(source types.TransformSourceVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = source + r.req.Source = source.TransformSourceCaster() return r } -// Sync Defines the properties transforms require to run continuously. +// Defines the properties transforms require to run continuously. // API name: sync -func (r *PreviewTransform) Sync(sync *types.SyncContainer) *PreviewTransform { +func (r *PreviewTransform) Sync(sync types.SyncContainerVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Sync = sync + r.req.Sync = sync.SyncContainerCaster() return r } diff --git a/typedapi/transform/previewtransform/request.go b/typedapi/transform/previewtransform/request.go index 6977264a96..1b9a9deafe 100644 --- a/typedapi/transform/previewtransform/request.go +++ b/typedapi/transform/previewtransform/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package previewtransform @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package previewtransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/preview_transform/PreviewTransformRequest.ts#L33-L108 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/preview_transform/PreviewTransformRequest.ts#L33-L119 type Request struct { // Description Free text description of the transform. diff --git a/typedapi/transform/previewtransform/response.go b/typedapi/transform/previewtransform/response.go index a3dff573b7..0506a215b8 100644 --- a/typedapi/transform/previewtransform/response.go +++ b/typedapi/transform/previewtransform/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package previewtransform @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package previewtransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/preview_transform/PreviewTransformResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/preview_transform/PreviewTransformResponse.ts#L22-L27 type Response struct { GeneratedDestIndex types.IndexState `json:"generated_dest_index"` Preview []json.RawMessage `json:"preview"` diff --git a/typedapi/transform/puttransform/put_transform.go b/typedapi/transform/puttransform/put_transform.go index 4f1d8cac18..29c04c894e 100644 --- a/typedapi/transform/puttransform/put_transform.go +++ b/typedapi/transform/puttransform/put_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Create a transform. // Creates a transform. @@ -159,8 +159,6 @@ func New(tp elastictransport.Interface) *PutTransform { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -453,99 +451,141 @@ func (r *PutTransform) Pretty(pretty bool) *PutTransform { return r } -// Description Free text description of the transform. +// Free text description of the transform. // API name: description func (r *PutTransform) Description(description string) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination for the transform. +// The destination for the transform. // API name: dest -func (r *PutTransform) Dest(dest *types.TransformDestination) *PutTransform { +func (r *PutTransform) Dest(dest types.TransformDestinationVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = *dest + r.req.Dest = *dest.TransformDestinationCaster() return r } -// Frequency The interval between checks for changes in the source indices when the +// The interval between checks for changes in the source indices when the // transform is running continuously. Also // determines the retry interval in the event of transient failures while the // transform is searching or indexing. // The minimum value is `1s` and the maximum is `1h`. // API name: frequency -func (r *PutTransform) Frequency(duration types.Duration) *PutTransform { - r.req.Frequency = duration +func (r *PutTransform) Frequency(duration types.DurationVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } -// Latest The latest method transforms the data by finding the latest document for each +// The latest method transforms the data by finding the latest document for each // unique key. // API name: latest -func (r *PutTransform) Latest(latest *types.Latest) *PutTransform { +func (r *PutTransform) Latest(latest types.LatestVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Latest = latest + r.req.Latest = latest.LatestCaster() return r } -// Meta_ Defines optional transform metadata. +// Defines optional transform metadata. // API name: _meta -func (r *PutTransform) Meta_(metadata types.Metadata) *PutTransform { - r.req.Meta_ = metadata +func (r *PutTransform) Meta_(metadata types.MetadataVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// Pivot The pivot method transforms the data by aggregating and grouping it. These +// The pivot method transforms the data by aggregating and grouping it. These // objects define the group by fields // and the aggregation to reduce the data. // API name: pivot -func (r *PutTransform) Pivot(pivot *types.Pivot) *PutTransform { +func (r *PutTransform) Pivot(pivot types.PivotVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Pivot = pivot + r.req.Pivot = pivot.PivotCaster() return r } -// RetentionPolicy Defines a retention policy for the transform. Data that meets the defined +// Defines a retention policy for the transform. Data that meets the defined // criteria is deleted from the // destination index. // API name: retention_policy -func (r *PutTransform) RetentionPolicy(retentionpolicy *types.RetentionPolicyContainer) *PutTransform { +func (r *PutTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainerVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.RetentionPolicy = retentionpolicy + r.req.RetentionPolicy = retentionpolicy.RetentionPolicyContainerCaster() return r } -// Settings Defines optional transform settings. +// Defines optional transform settings. // API name: settings -func (r *PutTransform) Settings(settings *types.Settings) *PutTransform { +func (r *PutTransform) Settings(settings types.SettingsVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.SettingsCaster() return r } -// Source The source of the data for the transform. +// The source of the data for the transform. // API name: source -func (r *PutTransform) Source(source *types.TransformSource) *PutTransform { +func (r *PutTransform) Source(source types.TransformSourceVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = *source + r.req.Source = *source.TransformSourceCaster() return r } -// Sync Defines the properties transforms require to run continuously. +// Defines the properties transforms require to run continuously. // API name: sync -func (r *PutTransform) Sync(sync *types.SyncContainer) *PutTransform { +func (r *PutTransform) Sync(sync types.SyncContainerVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Sync = sync + r.req.Sync = sync.SyncContainerCaster() return r } diff --git a/typedapi/transform/puttransform/request.go b/typedapi/transform/puttransform/request.go index 0ef25f8124..b318f5dfe2 100644 --- a/typedapi/transform/puttransform/request.go +++ b/typedapi/transform/puttransform/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttransform @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package puttransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/put_transform/PutTransformRequest.ts#L33-L123 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/put_transform/PutTransformRequest.ts#L33-L130 type Request struct { // Description Free text description of the transform. diff --git a/typedapi/transform/puttransform/response.go b/typedapi/transform/puttransform/response.go index 8905e79d78..6bd9b97dc0 100644 --- a/typedapi/transform/puttransform/response.go +++ b/typedapi/transform/puttransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package puttransform // Response holds the response body struct for the package puttransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/put_transform/PutTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/put_transform/PutTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/resettransform/reset_transform.go b/typedapi/transform/resettransform/reset_transform.go index 774662fb52..ba6038a3d8 100644 --- a/typedapi/transform/resettransform/reset_transform.go +++ b/typedapi/transform/resettransform/reset_transform.go @@ -16,10 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Reset a transform. -// Resets a transform. +// // Before you can reset it, you must stop it; alternatively, use the `force` // query parameter. // If the destination index was created by the transform, it is deleted. @@ -81,7 +81,7 @@ func NewResetTransformFunc(tp elastictransport.Interface) NewResetTransform { } // Reset a transform. -// Resets a transform. +// // Before you can reset it, you must stop it; alternatively, use the `force` // query parameter. // If the destination index was created by the transform, it is deleted. @@ -320,6 +320,15 @@ func (r *ResetTransform) Force(force bool) *ResetTransform { return r } +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *ResetTransform) Timeout(duration string) *ResetTransform { + r.values.Set("timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/transform/resettransform/response.go b/typedapi/transform/resettransform/response.go index cce915f475..8de5a7d588 100644 --- a/typedapi/transform/resettransform/response.go +++ b/typedapi/transform/resettransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package resettransform // Response holds the response body struct for the package resettransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/reset_transform/ResetTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/reset_transform/ResetTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/schedulenowtransform/response.go b/typedapi/transform/schedulenowtransform/response.go index a1bc30bc0c..41dcf9996f 100644 --- a/typedapi/transform/schedulenowtransform/response.go +++ b/typedapi/transform/schedulenowtransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package schedulenowtransform // Response holds the response body struct for the package schedulenowtransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/schedule_now_transform/ScheduleNowTransformResponse.ts#L21-L23 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/schedule_now_transform/ScheduleNowTransformResponse.ts#L21-L23 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/schedulenowtransform/schedule_now_transform.go b/typedapi/transform/schedulenowtransform/schedule_now_transform.go index b0306cb016..b6b42a3831 100644 --- a/typedapi/transform/schedulenowtransform/schedule_now_transform.go +++ b/typedapi/transform/schedulenowtransform/schedule_now_transform.go @@ -16,16 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Schedule a transform to start now. -// Instantly runs a transform to process data. // -// If you _schedule_now a transform, it will process the new data instantly, -// without waiting for the configured frequency interval. After _schedule_now -// API is called, -// the transform will be processed again at now + frequency unless _schedule_now -// API +// Instantly run a transform to process data. +// If you run this API, the transform will process the new data instantly, +// without waiting for the configured frequency interval. After the API is +// called, +// the transform will be processed again at `now + frequency` unless the API // is called again in the meantime. package schedulenowtransform @@ -85,13 +84,12 @@ func NewScheduleNowTransformFunc(tp elastictransport.Interface) NewScheduleNowTr } // Schedule a transform to start now. -// Instantly runs a transform to process data. // -// If you _schedule_now a transform, it will process the new data instantly, -// without waiting for the configured frequency interval. After _schedule_now -// API is called, -// the transform will be processed again at now + frequency unless _schedule_now -// API +// Instantly run a transform to process data. +// If you run this API, the transform will process the new data instantly, +// without waiting for the configured frequency interval. After the API is +// called, +// the transform will be processed again at `now + frequency` unless the API // is called again in the meantime. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/schedule-now-transform.html diff --git a/typedapi/transform/starttransform/response.go b/typedapi/transform/starttransform/response.go index 547d88ae1a..6213a38d94 100644 --- a/typedapi/transform/starttransform/response.go +++ b/typedapi/transform/starttransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package starttransform // Response holds the response body struct for the package starttransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/start_transform/StartTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/start_transform/StartTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/starttransform/start_transform.go b/typedapi/transform/starttransform/start_transform.go index 56178341a8..1043dce4e8 100644 --- a/typedapi/transform/starttransform/start_transform.go +++ b/typedapi/transform/starttransform/start_transform.go @@ -16,10 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Start a transform. -// Starts a transform. // // When you start a transform, it creates the destination index if it does not // already exist. The `number_of_shards` is @@ -105,7 +104,6 @@ func NewStartTransformFunc(tp elastictransport.Interface) NewStartTransform { } // Start a transform. -// Starts a transform. // // When you start a transform, it creates the destination index if it does not // already exist. The `number_of_shards` is diff --git a/typedapi/transform/stoptransform/response.go b/typedapi/transform/stoptransform/response.go index 148cd66cc4..355368f05f 100644 --- a/typedapi/transform/stoptransform/response.go +++ b/typedapi/transform/stoptransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stoptransform // Response holds the response body struct for the package stoptransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/stop_transform/StopTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/stop_transform/StopTransformResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/transform/stoptransform/stop_transform.go b/typedapi/transform/stoptransform/stop_transform.go index a6a22491cb..ea75206406 100644 --- a/typedapi/transform/stoptransform/stop_transform.go +++ b/typedapi/transform/stoptransform/stop_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Stop transforms. // Stops one or more transforms. diff --git a/typedapi/transform/updatetransform/request.go b/typedapi/transform/updatetransform/request.go index fd39845ef2..a7f4929745 100644 --- a/typedapi/transform/updatetransform/request.go +++ b/typedapi/transform/updatetransform/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatetransform @@ -33,7 +33,7 @@ import ( // Request holds the request body struct for the package updatetransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/update_transform/UpdateTransformRequest.ts#L31-L106 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/update_transform/UpdateTransformRequest.ts#L31-L113 type Request struct { // Description Free text description of the transform. diff --git a/typedapi/transform/updatetransform/response.go b/typedapi/transform/updatetransform/response.go index 4bf3ff6e3f..e214d453c2 100644 --- a/typedapi/transform/updatetransform/response.go +++ b/typedapi/transform/updatetransform/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package updatetransform @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatetransform // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/update_transform/UpdateTransformResponse.ts#L33-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/update_transform/UpdateTransformResponse.ts#L33-L51 type Response struct { Authorization *types.TransformAuthorization `json:"authorization,omitempty"` CreateTime int64 `json:"create_time"` diff --git a/typedapi/transform/updatetransform/update_transform.go b/typedapi/transform/updatetransform/update_transform.go index 272c4fd3b1..59a7fcc68e 100644 --- a/typedapi/transform/updatetransform/update_transform.go +++ b/typedapi/transform/updatetransform/update_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Update a transform. // Updates certain properties of a transform. @@ -113,8 +113,6 @@ func New(tp elastictransport.Interface) *UpdateTransform { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -400,75 +398,110 @@ func (r *UpdateTransform) Pretty(pretty bool) *UpdateTransform { return r } -// Description Free text description of the transform. +// Free text description of the transform. // API name: description func (r *UpdateTransform) Description(description string) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Description = &description return r } -// Dest The destination for the transform. +// The destination for the transform. // API name: dest -func (r *UpdateTransform) Dest(dest *types.TransformDestination) *UpdateTransform { +func (r *UpdateTransform) Dest(dest types.TransformDestinationVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Dest = dest + r.req.Dest = dest.TransformDestinationCaster() return r } -// Frequency The interval between checks for changes in the source indices when the +// The interval between checks for changes in the source indices when the // transform is running continuously. Also determines the retry interval in // the event of transient failures while the transform is searching or // indexing. The minimum value is 1s and the maximum is 1h. // API name: frequency -func (r *UpdateTransform) Frequency(duration types.Duration) *UpdateTransform { - r.req.Frequency = duration +func (r *UpdateTransform) Frequency(duration types.DurationVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() return r } -// Meta_ Defines optional transform metadata. +// Defines optional transform metadata. // API name: _meta -func (r *UpdateTransform) Meta_(metadata types.Metadata) *UpdateTransform { - r.req.Meta_ = metadata +func (r *UpdateTransform) Meta_(metadata types.MetadataVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() return r } -// RetentionPolicy Defines a retention policy for the transform. Data that meets the defined +// Defines a retention policy for the transform. Data that meets the defined // criteria is deleted from the destination index. // API name: retention_policy -func (r *UpdateTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainer) *UpdateTransform { - r.req.RetentionPolicy = &retentionpolicy +func (r *UpdateTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainerVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RetentionPolicy = retentionpolicy.RetentionPolicyContainerCaster() return r } -// Settings Defines optional transform settings. +// Defines optional transform settings. // API name: settings -func (r *UpdateTransform) Settings(settings *types.Settings) *UpdateTransform { +func (r *UpdateTransform) Settings(settings types.SettingsVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Settings = settings + r.req.Settings = settings.SettingsCaster() return r } -// Source The source of the data for the transform. +// The source of the data for the transform. // API name: source -func (r *UpdateTransform) Source(source *types.TransformSource) *UpdateTransform { +func (r *UpdateTransform) Source(source types.TransformSourceVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Source = source + r.req.Source = source.TransformSourceCaster() return r } -// Sync Defines the properties transforms require to run continuously. +// Defines the properties transforms require to run continuously. // API name: sync -func (r *UpdateTransform) Sync(sync *types.SyncContainer) *UpdateTransform { +func (r *UpdateTransform) Sync(sync types.SyncContainerVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Sync = sync + r.req.Sync = sync.SyncContainerCaster() return r } diff --git a/typedapi/transform/upgradetransforms/response.go b/typedapi/transform/upgradetransforms/response.go index c75cb672f5..fd544e43ae 100644 --- a/typedapi/transform/upgradetransforms/response.go +++ b/typedapi/transform/upgradetransforms/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package upgradetransforms // Response holds the response body struct for the package upgradetransforms // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/upgrade_transforms/UpgradeTransformsResponse.ts#L25-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/upgrade_transforms/UpgradeTransformsResponse.ts#L25-L34 type Response struct { // NeedsUpdate The number of transforms that need to be upgraded. diff --git a/typedapi/transform/upgradetransforms/upgrade_transforms.go b/typedapi/transform/upgradetransforms/upgrade_transforms.go index 8afc9b7bfe..1150fa31c5 100644 --- a/typedapi/transform/upgradetransforms/upgrade_transforms.go +++ b/typedapi/transform/upgradetransforms/upgrade_transforms.go @@ -16,18 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Upgrades all transforms. +// Upgrade all transforms. +// +// Transforms are compatible across minor versions and between supported major +// versions. +// However, over time, the format of transform configuration information may +// change. // This API identifies transforms that have a legacy configuration format and -// upgrades them to the latest version. It -// also cleans up the internal data structures that store the transform state -// and checkpoints. The upgrade does not -// affect the source and destination indices. The upgrade also does not affect -// the roles that transforms use when +// upgrades them to the latest version. +// It also cleans up the internal data structures that store the transform state +// and checkpoints. +// The upgrade does not affect the source and destination indices. +// The upgrade also does not affect the roles that transforms use when // Elasticsearch security features are enabled; the role used to read source -// data and write to the destination index -// remains unchanged. +// data and write to the destination index remains unchanged. +// +// If a transform upgrade step fails, the upgrade stops and an error is returned +// about the underlying issue. +// Resolve the issue then re-run the process again. +// A summary is returned when the upgrade is finished. +// +// To ensure continuous transforms remain running during a major version upgrade +// of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade +// transforms before upgrading the cluster. +// You may want to perform a recent cluster backup prior to the upgrade. package upgradetransforms import ( @@ -77,16 +91,30 @@ func NewUpgradeTransformsFunc(tp elastictransport.Interface) NewUpgradeTransform } } -// Upgrades all transforms. +// Upgrade all transforms. +// +// Transforms are compatible across minor versions and between supported major +// versions. +// However, over time, the format of transform configuration information may +// change. // This API identifies transforms that have a legacy configuration format and -// upgrades them to the latest version. It -// also cleans up the internal data structures that store the transform state -// and checkpoints. The upgrade does not -// affect the source and destination indices. The upgrade also does not affect -// the roles that transforms use when +// upgrades them to the latest version. +// It also cleans up the internal data structures that store the transform state +// and checkpoints. +// The upgrade does not affect the source and destination indices. +// The upgrade also does not affect the roles that transforms use when // Elasticsearch security features are enabled; the role used to read source -// data and write to the destination index -// remains unchanged. +// data and write to the destination index remains unchanged. +// +// If a transform upgrade step fails, the upgrade stops and an error is returned +// about the underlying issue. +// Resolve the issue then re-run the process again. +// A summary is returned when the upgrade is finished. +// +// To ensure continuous transforms remain running during a major version upgrade +// of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade +// transforms before upgrading the cluster. +// You may want to perform a recent cluster backup prior to the upgrade. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/upgrade-transforms.html func New(tp elastictransport.Interface) *UpgradeTransforms { diff --git a/typedapi/types/access.go b/typedapi/types/access.go new file mode 100644 index 0000000000..d7eb7696e2 --- /dev/null +++ b/typedapi/types/access.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// Access type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Access.ts#L22-L31 +type Access struct { + // Replication A list of indices permission entries for cross-cluster replication. + Replication []ReplicationAccess `json:"replication,omitempty"` + // Search A list of indices permission entries for cross-cluster search. + Search []SearchAccess `json:"search,omitempty"` +} + +// NewAccess returns a Access. +func NewAccess() *Access { + r := &Access{} + + return r +} + +// true + +type AccessVariant interface { + AccessCaster() *Access +} + +func (s *Access) AccessCaster() *Access { + return s +} diff --git a/typedapi/types/acknowledgement.go b/typedapi/types/acknowledgement.go index 348a241b72..f6a2a0058d 100644 --- a/typedapi/types/acknowledgement.go +++ b/typedapi/types/acknowledgement.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Acknowledgement type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/post/types.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/post/types.ts#L20-L23 type Acknowledgement struct { License []string `json:"license"` Message string `json:"message"` @@ -80,3 +80,5 @@ func NewAcknowledgement() *Acknowledgement { return r } + +// false diff --git a/typedapi/types/acknowledgestate.go b/typedapi/types/acknowledgestate.go index 57167ace07..1660b6a784 100644 --- a/typedapi/types/acknowledgestate.go +++ b/typedapi/types/acknowledgestate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // AcknowledgeState type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L115-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L109-L112 type AcknowledgeState struct { State acknowledgementoptions.AcknowledgementOptions `json:"state"` Timestamp DateTime `json:"timestamp"` @@ -74,3 +74,13 @@ func NewAcknowledgeState() *AcknowledgeState { return r } + +// true + +type AcknowledgeStateVariant interface { + AcknowledgeStateCaster() *AcknowledgeState +} + +func (s *AcknowledgeState) AcknowledgeStateCaster() *AcknowledgeState { + return s +} diff --git a/typedapi/types/actionstatus.go b/typedapi/types/actionstatus.go index 325e67123b..8110899e9d 100644 --- a/typedapi/types/actionstatus.go +++ b/typedapi/types/actionstatus.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ActionStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L131-L136 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L125-L130 type ActionStatus struct { Ack AcknowledgeState `json:"ack"` LastExecution *ExecutionState `json:"last_execution,omitempty"` @@ -36,3 +36,13 @@ func NewActionStatus() *ActionStatus { return r } + +// true + +type ActionStatusVariant interface { + ActionStatusCaster() *ActionStatus +} + +func (s *ActionStatus) ActionStatusCaster() *ActionStatus { + return s +} diff --git a/typedapi/types/activationstate.go b/typedapi/types/activationstate.go index 48d5737fbb..9ebcbbf4f9 100644 --- a/typedapi/types/activationstate.go +++ b/typedapi/types/activationstate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ActivationState type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Activation.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Activation.ts#L24-L27 type ActivationState struct { Active bool `json:"active"` Timestamp DateTime `json:"timestamp"` @@ -82,3 +82,13 @@ func NewActivationState() *ActivationState { return r } + +// true + +type ActivationStateVariant interface { + ActivationStateCaster() *ActivationState +} + +func (s *ActivationState) ActivationStateCaster() *ActivationState { + return s +} diff --git a/typedapi/types/activationstatus.go b/typedapi/types/activationstatus.go index f91d2c1d83..e1889f6ac7 100644 --- a/typedapi/types/activationstatus.go +++ b/typedapi/types/activationstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ActivationStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Activation.ts#L29-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Activation.ts#L29-L33 type ActivationStatus struct { Actions WatcherStatusActions `json:"actions"` State ActivationState `json:"state"` @@ -78,3 +78,5 @@ func NewActivationStatus() *ActivationStatus { return r } + +// false diff --git a/typedapi/types/adaptiveallocationssettings.go b/typedapi/types/adaptiveallocationssettings.go new file mode 100644 index 0000000000..891676c395 --- /dev/null +++ b/typedapi/types/adaptiveallocationssettings.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AdaptiveAllocationsSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L109-L125 +type AdaptiveAllocationsSettings struct { + // Enabled If true, adaptive_allocations is enabled + Enabled bool `json:"enabled"` + // MaxNumberOfAllocations Specifies the maximum number of allocations to scale to. + // If set, it must be greater than or equal to min_number_of_allocations. + MaxNumberOfAllocations *int `json:"max_number_of_allocations,omitempty"` + // MinNumberOfAllocations Specifies the minimum number of allocations to scale to. + // If set, it must be greater than or equal to 0. + // If not defined, the deployment scales to 0. + MinNumberOfAllocations *int `json:"min_number_of_allocations,omitempty"` +} + +func (s *AdaptiveAllocationsSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "max_number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumberOfAllocations", err) + } + s.MaxNumberOfAllocations = &value + case float64: + f := int(v) + s.MaxNumberOfAllocations = &f + } + + case "min_number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinNumberOfAllocations", err) + } + s.MinNumberOfAllocations = &value + case float64: + f := int(v) + s.MinNumberOfAllocations = &f + } + + } + } + return nil +} + +// NewAdaptiveAllocationsSettings returns a AdaptiveAllocationsSettings. +func NewAdaptiveAllocationsSettings() *AdaptiveAllocationsSettings { + r := &AdaptiveAllocationsSettings{} + + return r +} + +// true + +type AdaptiveAllocationsSettingsVariant interface { + AdaptiveAllocationsSettingsCaster() *AdaptiveAllocationsSettings +} + +func (s *AdaptiveAllocationsSettings) AdaptiveAllocationsSettingsCaster() *AdaptiveAllocationsSettings { + return s +} diff --git a/typedapi/types/adaptiveselection.go b/typedapi/types/adaptiveselection.go index 77c9cfac9c..e20bfcc599 100644 --- a/typedapi/types/adaptiveselection.go +++ b/typedapi/types/adaptiveselection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AdaptiveSelection type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L403-L432 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L439-L468 type AdaptiveSelection struct { // AvgQueueSize The exponentially weighted moving average queue size of search requests on // the keyed node. @@ -163,3 +163,5 @@ func NewAdaptiveSelection() *AdaptiveSelection { return r } + +// false diff --git a/typedapi/types/addaction.go b/typedapi/types/addaction.go index 494d9ad478..3a7f82d2c2 100644 --- a/typedapi/types/addaction.go +++ b/typedapi/types/addaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AddAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/update_aliases/types.ts#L41-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/update_aliases/types.ts#L41-L95 type AddAction struct { // Alias Alias for the action. // Index alias names support date math. @@ -196,3 +196,13 @@ func NewAddAction() *AddAction { return r } + +// true + +type AddActionVariant interface { + AddActionCaster() *AddAction +} + +func (s *AddAction) AddActionCaster() *AddAction { + return s +} diff --git a/typedapi/types/adjacencymatrixaggregate.go b/typedapi/types/adjacencymatrixaggregate.go index d15059f921..7302a03da8 100644 --- a/typedapi/types/adjacencymatrixaggregate.go +++ b/typedapi/types/adjacencymatrixaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // AdjacencyMatrixAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L577-L579 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L647-L652 type AdjacencyMatrixAggregate struct { Buckets BucketsAdjacencyMatrixBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewAdjacencyMatrixAggregate() *AdjacencyMatrixAggregate { return r } + +// false diff --git a/typedapi/types/adjacencymatrixaggregation.go b/typedapi/types/adjacencymatrixaggregation.go index 9f0783d54a..c46347a0de 100644 --- a/typedapi/types/adjacencymatrixaggregation.go +++ b/typedapi/types/adjacencymatrixaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AdjacencyMatrixAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L55-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L60-L70 type AdjacencyMatrixAggregation struct { // Filters Filters used to create buckets. // At least one filter is required. @@ -83,8 +83,18 @@ func (s *AdjacencyMatrixAggregation) UnmarshalJSON(data []byte) error { // NewAdjacencyMatrixAggregation returns a AdjacencyMatrixAggregation. func NewAdjacencyMatrixAggregation() *AdjacencyMatrixAggregation { r := &AdjacencyMatrixAggregation{ - Filters: make(map[string]Query, 0), + Filters: make(map[string]Query), } return r } + +// true + +type AdjacencyMatrixAggregationVariant interface { + AdjacencyMatrixAggregationCaster() *AdjacencyMatrixAggregation +} + +func (s *AdjacencyMatrixAggregation) AdjacencyMatrixAggregationCaster() *AdjacencyMatrixAggregation { + return s +} diff --git a/typedapi/types/adjacencymatrixbucket.go b/typedapi/types/adjacencymatrixbucket.go index e14d9c9280..7a26f0f767 100644 --- a/typedapi/types/adjacencymatrixbucket.go +++ b/typedapi/types/adjacencymatrixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // AdjacencyMatrixBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L581-L583 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L654-L656 type AdjacencyMatrixBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -498,6 +498,13 @@ func (s *AdjacencyMatrixBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -631,8 +638,10 @@ func (s AdjacencyMatrixBucket) MarshalJSON() ([]byte, error) { // NewAdjacencyMatrixBucket returns a AdjacencyMatrixBucket. func NewAdjacencyMatrixBucket() *AdjacencyMatrixBucket { r := &AdjacencyMatrixBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/aggregate.go b/typedapi/types/aggregate.go index 5319872c58..f510539a45 100644 --- a/typedapi/types/aggregate.go +++ b/typedapi/types/aggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -80,6 +80,7 @@ package types // UnmappedSignificantTermsAggregate // CompositeAggregate // FrequentItemSetsAggregate +// TimeSeriesAggregate // ScriptedMetricAggregate // TopHitsAggregate // InferenceAggregate @@ -92,5 +93,5 @@ package types // MatrixStatsAggregate // GeoLineAggregate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L38-L123 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L38-L125 type Aggregate any diff --git a/typedapi/types/aggregatemetricdoubleproperty.go b/typedapi/types/aggregatemetricdoubleproperty.go index 8ef2eed5d4..8c3e0f380b 100644 --- a/typedapi/types/aggregatemetricdoubleproperty.go +++ b/typedapi/types/aggregatemetricdoubleproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,23 +29,25 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // AggregateMetricDoubleProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/complex.ts#L61-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/complex.ts#L59-L64 type AggregateMetricDoubleProperty struct { DefaultMetric string `json:"default_metric"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Metrics []string `json:"metrics"` - Properties map[string]Property `json:"properties,omitempty"` - TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Metrics []string `json:"metrics"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` } func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { @@ -99,301 +101,313 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -447,306 +461,323 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_metric": if err := dec.Decode(&s.TimeSeriesMetric); err != nil { return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) @@ -766,15 +797,16 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { func (s AggregateMetricDoubleProperty) MarshalJSON() ([]byte, error) { type innerAggregateMetricDoubleProperty AggregateMetricDoubleProperty tmp := innerAggregateMetricDoubleProperty{ - DefaultMetric: s.DefaultMetric, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Metrics: s.Metrics, - Properties: s.Properties, - TimeSeriesMetric: s.TimeSeriesMetric, - Type: s.Type, + DefaultMetric: s.DefaultMetric, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Metrics: s.Metrics, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, } tmp.Type = "aggregate_metric_double" @@ -785,10 +817,20 @@ func (s AggregateMetricDoubleProperty) MarshalJSON() ([]byte, error) { // NewAggregateMetricDoubleProperty returns a AggregateMetricDoubleProperty. func NewAggregateMetricDoubleProperty() *AggregateMetricDoubleProperty { r := &AggregateMetricDoubleProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type AggregateMetricDoublePropertyVariant interface { + AggregateMetricDoublePropertyCaster() *AggregateMetricDoubleProperty +} + +func (s *AggregateMetricDoubleProperty) AggregateMetricDoublePropertyCaster() *AggregateMetricDoubleProperty { + return s +} diff --git a/typedapi/types/aggregateorder.go b/typedapi/types/aggregateorder.go index 99910263d5..5a53c8c6c8 100644 --- a/typedapi/types/aggregateorder.go +++ b/typedapi/types/aggregateorder.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // map[string]sortorder.SortOrder // []map[string]sortorder.SortOrder // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L983-L985 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1052-L1054 type AggregateOrder any + +type AggregateOrderVariant interface { + AggregateOrderCaster() *AggregateOrder +} diff --git a/typedapi/types/aggregateoutput.go b/typedapi/types/aggregateoutput.go index df20e617d9..4905ff25cf 100644 --- a/typedapi/types/aggregateoutput.go +++ b/typedapi/types/aggregateoutput.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // AggregateOutput type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L101-L106 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L101-L106 type AggregateOutput struct { Exponent *Weights `json:"exponent,omitempty"` LogisticRegression *Weights `json:"logistic_regression,omitempty"` @@ -36,3 +36,13 @@ func NewAggregateOutput() *AggregateOutput { return r } + +// true + +type AggregateOutputVariant interface { + AggregateOutputCaster() *AggregateOutput +} + +func (s *AggregateOutput) AggregateOutputCaster() *AggregateOutput { + return s +} diff --git a/typedapi/types/aggregationbreakdown.go b/typedapi/types/aggregationbreakdown.go index aaac9bb64f..b4da1e0ae8 100644 --- a/typedapi/types/aggregationbreakdown.go +++ b/typedapi/types/aggregationbreakdown.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AggregationBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L26-L39 type AggregationBreakdown struct { BuildAggregation int64 `json:"build_aggregation"` BuildAggregationCount int64 `json:"build_aggregation_count"` @@ -253,3 +253,5 @@ func NewAggregationBreakdown() *AggregationBreakdown { return r } + +// false diff --git a/typedapi/types/aggregationprofile.go b/typedapi/types/aggregationprofile.go index a92c7c5a38..0257ed42e5 100644 --- a/typedapi/types/aggregationprofile.go +++ b/typedapi/types/aggregationprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AggregationProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L77-L84 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L85-L92 type AggregationProfile struct { Breakdown AggregationBreakdown `json:"breakdown"` Children []AggregationProfile `json:"children,omitempty"` @@ -111,3 +111,5 @@ func NewAggregationProfile() *AggregationProfile { return r } + +// false diff --git a/typedapi/types/aggregationprofiledebug.go b/typedapi/types/aggregationprofiledebug.go index e4df5ea2f5..887b74a3bc 100644 --- a/typedapi/types/aggregationprofiledebug.go +++ b/typedapi/types/aggregationprofiledebug.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,8 +31,9 @@ import ( // AggregationProfileDebug type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L39-L68 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L42-L76 type AggregationProfileDebug struct { + BruteForceUsed *int `json:"brute_force_used,omitempty"` BuiltBuckets *int `json:"built_buckets,omitempty"` CharsFetched *int `json:"chars_fetched,omitempty"` CollectAnalyzedCount *int `json:"collect_analyzed_count,omitempty"` @@ -41,6 +42,8 @@ type AggregationProfileDebug struct { DeferredAggregators []string `json:"deferred_aggregators,omitempty"` Delegate *string `json:"delegate,omitempty"` DelegateDebug *AggregationProfileDebug `json:"delegate_debug,omitempty"` + DynamicPruningAttempted *int `json:"dynamic_pruning_attempted,omitempty"` + DynamicPruningUsed *int `json:"dynamic_pruning_used,omitempty"` EmptyCollectorsUsed *int `json:"empty_collectors_used,omitempty"` ExtractCount *int `json:"extract_count,omitempty"` ExtractNs *int `json:"extract_ns,omitempty"` @@ -57,6 +60,7 @@ type AggregationProfileDebug struct { SegmentsWithDocCountField *int `json:"segments_with_doc_count_field,omitempty"` SegmentsWithMultiValuedOrds *int `json:"segments_with_multi_valued_ords,omitempty"` SegmentsWithSingleValuedOrds *int `json:"segments_with_single_valued_ords,omitempty"` + SkippedDueToNoData *int `json:"skipped_due_to_no_data,omitempty"` StringHashingCollectorsUsed *int `json:"string_hashing_collectors_used,omitempty"` SurvivingBuckets *int `json:"surviving_buckets,omitempty"` TotalBuckets *int `json:"total_buckets,omitempty"` @@ -78,6 +82,22 @@ func (s *AggregationProfileDebug) UnmarshalJSON(data []byte) error { switch t { + case "brute_force_used": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BruteForceUsed", err) + } + s.BruteForceUsed = &value + case float64: + f := int(v) + s.BruteForceUsed = &f + } + case "built_buckets": var tmp any @@ -176,6 +196,38 @@ func (s *AggregationProfileDebug) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "DelegateDebug", err) } + case "dynamic_pruning_attempted": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DynamicPruningAttempted", err) + } + s.DynamicPruningAttempted = &value + case float64: + f := int(v) + s.DynamicPruningAttempted = &f + } + + case "dynamic_pruning_used": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DynamicPruningUsed", err) + } + s.DynamicPruningUsed = &value + case float64: + f := int(v) + s.DynamicPruningUsed = &f + } + case "empty_collectors_used": var tmp any @@ -411,6 +463,22 @@ func (s *AggregationProfileDebug) UnmarshalJSON(data []byte) error { s.SegmentsWithSingleValuedOrds = &f } + case "skipped_due_to_no_data": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkippedDueToNoData", err) + } + s.SkippedDueToNoData = &value + case float64: + f := int(v) + s.SkippedDueToNoData = &f + } + case "string_hashing_collectors_used": var tmp any @@ -486,3 +554,5 @@ func NewAggregationProfileDebug() *AggregationProfileDebug { return r } + +// false diff --git a/typedapi/types/aggregationprofiledelegatedebugfilter.go b/typedapi/types/aggregationprofiledelegatedebugfilter.go index 7a45114e99..5d8083c7a4 100644 --- a/typedapi/types/aggregationprofiledelegatedebugfilter.go +++ b/typedapi/types/aggregationprofiledelegatedebugfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AggregationProfileDelegateDebugFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L70-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L78-L83 type AggregationProfileDelegateDebugFilter struct { Query *string `json:"query,omitempty"` ResultsFromMetadata *int `json:"results_from_metadata,omitempty"` @@ -121,3 +121,5 @@ func NewAggregationProfileDelegateDebugFilter() *AggregationProfileDelegateDebug return r } + +// false diff --git a/typedapi/types/aggregationrange.go b/typedapi/types/aggregationrange.go index 9030ce7843..a0ad530d9e 100644 --- a/typedapi/types/aggregationrange.go +++ b/typedapi/types/aggregationrange.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AggregationRange type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L674-L687 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L691-L704 type AggregationRange struct { // From Start of the range (inclusive). From *Float64 `json:"from,omitempty"` @@ -57,19 +57,8 @@ func (s *AggregationRange) UnmarshalJSON(data []byte) error { switch t { case "from": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "From", err) - } - f := Float64(value) - s.From = &f - case float64: - f := Float64(v) - s.From = &f + if err := dec.Decode(&s.From); err != nil { + return fmt.Errorf("%s | %w", "From", err) } case "key": @@ -85,19 +74,8 @@ func (s *AggregationRange) UnmarshalJSON(data []byte) error { s.Key = &o case "to": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "To", err) - } - f := Float64(value) - s.To = &f - case float64: - f := Float64(v) - s.To = &f + if err := dec.Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) } } @@ -111,3 +89,13 @@ func NewAggregationRange() *AggregationRange { return r } + +// true + +type AggregationRangeVariant interface { + AggregationRangeCaster() *AggregationRange +} + +func (s *AggregationRange) AggregationRangeCaster() *AggregationRange { + return s +} diff --git a/typedapi/types/aggregations.go b/typedapi/types/aggregations.go index 1bdf438d5b..a54656afb3 100644 --- a/typedapi/types/aggregations.go +++ b/typedapi/types/aggregations.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,8 +30,9 @@ import ( // Aggregations type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/AggregationContainer.ts#L105-L514 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/AggregationContainer.ts#L107-L533 type Aggregations struct { + AdditionalAggregationsProperty map[string]json.RawMessage `json:"-"` // AdjacencyMatrix A bucket aggregation returning a form of adjacency matrix. // The request provides a collection of named filter expressions, similar to the // `filters` aggregation. @@ -223,6 +224,10 @@ type Aggregations struct { // PercentilesBucket A sibling pipeline aggregation which calculates percentiles across all bucket // of a specified metric in a sibling aggregation. PercentilesBucket *PercentilesBucketAggregation `json:"percentiles_bucket,omitempty"` + // RandomSampler A single bucket aggregation that randomly includes documents in the + // aggregated results. + // Sampling provides significant speed improvement at the cost of accuracy. + RandomSampler *RandomSamplerAggregation `json:"random_sampler,omitempty"` // Range A multi-bucket value source based aggregation that enables the user to define // a set of ranges - each representing a bucket. Range *RangeAggregation `json:"range,omitempty"` @@ -270,6 +275,10 @@ type Aggregations struct { // Terms A multi-bucket value source based aggregation where buckets are dynamically // built - one per unique value. Terms *TermsAggregation `json:"terms,omitempty"` + // TimeSeries The time series aggregation queries data created using a time series index. + // This is typically data such as metrics or other data streams with a time + // component, and requires creating an index using the time series mode. + TimeSeries *TimeSeriesAggregation `json:"time_series,omitempty"` // TopHits A metric aggregation that returns the top matching documents per bucket. TopHits *TopHitsAggregation `json:"top_hits,omitempty"` // TopMetrics A metric aggregation that selects metrics from the document with the largest @@ -555,36 +564,36 @@ func (s *Aggregations) UnmarshalJSON(data []byte) error { case "linear": o := NewLinearMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "linear", err) } s.MovingAvg = *o case "simple": o := NewSimpleMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "simple", err) } s.MovingAvg = *o case "ewma": o := NewEwmaMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ewma", err) } s.MovingAvg = *o case "holt": o := NewHoltMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "holt", err) } s.MovingAvg = *o case "holt_winters": o := NewHoltWintersMovingAverageAggregation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "holt_winters", err) } s.MovingAvg = *o default: if err := localDec.Decode(&s.MovingAvg); err != nil { - return err + return fmt.Errorf("MovingAvg | %w", err) } } @@ -633,6 +642,11 @@ func (s *Aggregations) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "PercentilesBucket", err) } + case "random_sampler": + if err := dec.Decode(&s.RandomSampler); err != nil { + return fmt.Errorf("%s | %w", "RandomSampler", err) + } + case "range": if err := dec.Decode(&s.Range); err != nil { return fmt.Errorf("%s | %w", "Range", err) @@ -713,6 +727,11 @@ func (s *Aggregations) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Terms", err) } + case "time_series": + if err := dec.Decode(&s.TimeSeries); err != nil { + return fmt.Errorf("%s | %w", "TimeSeries", err) + } + case "top_hits": if err := dec.Decode(&s.TopHits); err != nil { return fmt.Errorf("%s | %w", "TopHits", err) @@ -738,16 +757,69 @@ func (s *Aggregations) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "WeightedAvg", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalAggregationsProperty == nil { + s.AdditionalAggregationsProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalAggregationsProperty", err) + } + s.AdditionalAggregationsProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s Aggregations) MarshalJSON() ([]byte, error) { + type opt Aggregations + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalAggregationsProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalAggregationsProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewAggregations returns a Aggregations. func NewAggregations() *Aggregations { r := &Aggregations{ - Aggregations: make(map[string]Aggregations, 0), + AdditionalAggregationsProperty: make(map[string]json.RawMessage), + Aggregations: make(map[string]Aggregations), } return r } + +// true + +type AggregationsVariant interface { + AggregationsCaster() *Aggregations +} + +func (s *Aggregations) AggregationsCaster() *Aggregations { + return s +} diff --git a/typedapi/types/alias.go b/typedapi/types/alias.go index ac083b2217..1d732d97e6 100644 --- a/typedapi/types/alias.go +++ b/typedapi/types/alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Alias type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/Alias.ts#L23-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/Alias.ts#L23-L53 type Alias struct { // Filter Query used to limit documents the alias can access. Filter *Query `json:"filter,omitempty"` @@ -124,3 +124,13 @@ func NewAlias() *Alias { return r } + +// true + +type AliasVariant interface { + AliasCaster() *Alias +} + +func (s *Alias) AliasCaster() *Alias { + return s +} diff --git a/typedapi/types/aliasdefinition.go b/typedapi/types/aliasdefinition.go index 0c257764af..a152d81bb3 100644 --- a/typedapi/types/aliasdefinition.go +++ b/typedapi/types/aliasdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AliasDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/AliasDefinition.ts#L22-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/AliasDefinition.ts#L22-L54 type AliasDefinition struct { // Filter Query used to limit documents the alias can access. Filter *Query `json:"filter,omitempty"` @@ -145,3 +145,13 @@ func NewAliasDefinition() *AliasDefinition { return r } + +// true + +type AliasDefinitionVariant interface { + AliasDefinitionCaster() *AliasDefinition +} + +func (s *AliasDefinition) AliasDefinitionCaster() *AliasDefinition { + return s +} diff --git a/typedapi/types/aliasesrecord.go b/typedapi/types/aliasesrecord.go index 5b416e6e02..c3ef70ae13 100644 --- a/typedapi/types/aliasesrecord.go +++ b/typedapi/types/aliasesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AliasesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/aliases/types.ts#L22-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/aliases/types.ts#L22-L53 type AliasesRecord struct { // Alias alias name Alias *string `json:"alias,omitempty"` @@ -138,3 +138,5 @@ func NewAliasesRecord() *AliasesRecord { return r } + +// false diff --git a/typedapi/types/allfield.go b/typedapi/types/allfield.go index 7e2ae6585b..5a2525c13f 100644 --- a/typedapi/types/allfield.go +++ b/typedapi/types/allfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AllField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/meta-fields.ts#L29-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/meta-fields.ts#L29-L40 type AllField struct { Analyzer string `json:"analyzer"` Enabled bool `json:"enabled"` @@ -205,3 +205,13 @@ func NewAllField() *AllField { return r } + +// true + +type AllFieldVariant interface { + AllFieldCaster() *AllField +} + +func (s *AllField) AllFieldCaster() *AllField { + return s +} diff --git a/typedapi/types/allocateaction.go b/typedapi/types/allocateaction.go index 33fd58ace7..9f3cb34f24 100644 --- a/typedapi/types/allocateaction.go +++ b/typedapi/types/allocateaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AllocateAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L136-L142 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L133-L139 type AllocateAction struct { Exclude map[string]string `json:"exclude,omitempty"` Include map[string]string `json:"include,omitempty"` @@ -119,10 +119,20 @@ func (s *AllocateAction) UnmarshalJSON(data []byte) error { // NewAllocateAction returns a AllocateAction. func NewAllocateAction() *AllocateAction { r := &AllocateAction{ - Exclude: make(map[string]string, 0), - Include: make(map[string]string, 0), - Require: make(map[string]string, 0), + Exclude: make(map[string]string), + Include: make(map[string]string), + Require: make(map[string]string), } return r } + +// true + +type AllocateActionVariant interface { + AllocateActionCaster() *AllocateAction +} + +func (s *AllocateAction) AllocateActionCaster() *AllocateAction { + return s +} diff --git a/typedapi/types/allocationdecision.go b/typedapi/types/allocationdecision.go index 262049a387..4b432907fc 100644 --- a/typedapi/types/allocationdecision.go +++ b/typedapi/types/allocationdecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // AllocationDecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L26-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L27-L31 type AllocationDecision struct { Decider string `json:"decider"` Decision allocationexplaindecision.AllocationExplainDecision `json:"decision"` @@ -95,3 +95,5 @@ func NewAllocationDecision() *AllocationDecision { return r } + +// false diff --git a/typedapi/types/allocationrecord.go b/typedapi/types/allocationrecord.go index a7c71eaa5f..c9b80d98a7 100644 --- a/typedapi/types/allocationrecord.go +++ b/typedapi/types/allocationrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,25 +31,25 @@ import ( // AllocationRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/allocation/types.ts#L24-L98 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/allocation/types.ts#L25-L99 type AllocationRecord struct { // DiskAvail Free disk space available to Elasticsearch. // Elasticsearch retrieves this metric from the node’s operating system. // Disk-based shard allocation uses this metric to assign shards to nodes based // on available disk space. - DiskAvail *ByteSize `json:"disk.avail,omitempty"` + DiskAvail ByteSize `json:"disk.avail,omitempty"` // DiskIndices Disk space used by the node’s shards. Does not include disk space for the // translog or unassigned shards. // IMPORTANT: This metric double-counts disk space for hard-linked files, such // as those created when shrinking, splitting, or cloning an index. - DiskIndices *ByteSize `json:"disk.indices,omitempty"` + DiskIndices ByteSize `json:"disk.indices,omitempty"` // DiskIndicesForecast Sum of shard size forecasts - DiskIndicesForecast *ByteSize `json:"disk.indices.forecast,omitempty"` + DiskIndicesForecast ByteSize `json:"disk.indices.forecast,omitempty"` // DiskPercent Total percentage of disk space in use. Calculated as `disk.used / // disk.total`. - DiskPercent *Percentage `json:"disk.percent,omitempty"` + DiskPercent Percentage `json:"disk.percent,omitempty"` // DiskTotal Total disk space for the node, including in-use and available space. - DiskTotal *ByteSize `json:"disk.total,omitempty"` + DiskTotal ByteSize `json:"disk.total,omitempty"` // DiskUsed Total disk space in use. // Elasticsearch retrieves this metric from the node’s operating system (OS). // The metric includes disk space for: Elasticsearch, including the translog and @@ -57,22 +57,22 @@ type AllocationRecord struct { // files on the node. // Unlike `disk.indices`, this metric does not double-count disk space for // hard-linked files. - DiskUsed *ByteSize `json:"disk.used,omitempty"` + DiskUsed ByteSize `json:"disk.used,omitempty"` // Host Network host for the node. Set using the `network.host` setting. Host *string `json:"host,omitempty"` // Ip IP address and port for the node. Ip *string `json:"ip,omitempty"` // Node Name for the node. Set using the `node.name` setting. - Node string `json:"node"` + Node *string `json:"node,omitempty"` // NodeRole Node roles NodeRole *string `json:"node.role,omitempty"` // Shards Number of primary and replica shards assigned to the node. - Shards string `json:"shards"` + Shards *string `json:"shards,omitempty"` // ShardsUndesired Amount of shards that are scheduled to be moved elsewhere in the cluster or // -1 other than desired balance allocator is used ShardsUndesired *string `json:"shards.undesired,omitempty"` // WriteLoadForecast Sum of index write load forecasts - WriteLoadForecast *Float64 `json:"write_load.forecast,omitempty"` + WriteLoadForecast Stringifieddouble `json:"write_load.forecast,omitempty"` } func (s *AllocationRecord) UnmarshalJSON(data []byte) error { @@ -140,7 +140,7 @@ func (s *AllocationRecord) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Node = o + s.Node = &o case "node.role", "r", "role", "nodeRole": var tmp json.RawMessage @@ -164,7 +164,7 @@ func (s *AllocationRecord) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Shards = o + s.Shards = &o case "shards.undesired": var tmp json.RawMessage @@ -194,3 +194,5 @@ func NewAllocationRecord() *AllocationRecord { return r } + +// false diff --git a/typedapi/types/allocationstore.go b/typedapi/types/allocationstore.go index f3b6e25476..1aa7253e33 100644 --- a/typedapi/types/allocationstore.go +++ b/typedapi/types/allocationstore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AllocationStore type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L39-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L40-L47 type AllocationStore struct { AllocationId string `json:"allocation_id"` Found bool `json:"found"` @@ -148,3 +148,5 @@ func NewAllocationStore() *AllocationStore { return r } + +// false diff --git a/typedapi/types/alwayscondition.go b/typedapi/types/alwayscondition.go index 26e02caf9c..5be81eb9fe 100644 --- a/typedapi/types/alwayscondition.go +++ b/typedapi/types/alwayscondition.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // AlwaysCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Conditions.ts#L25-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Conditions.ts#L25-L25 type AlwaysCondition struct { } @@ -32,3 +32,13 @@ func NewAlwaysCondition() *AlwaysCondition { return r } + +// true + +type AlwaysConditionVariant interface { + AlwaysConditionCaster() *AlwaysCondition +} + +func (s *AlwaysCondition) AlwaysConditionCaster() *AlwaysCondition { + return s +} diff --git a/typedapi/types/analysisconfig.go b/typedapi/types/analysisconfig.go index 2474a8e441..36a4b6aa54 100644 --- a/typedapi/types/analysisconfig.go +++ b/typedapi/types/analysisconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnalysisConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Analysis.ts#L29-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Analysis.ts#L29-L77 type AnalysisConfig struct { // BucketSpan The size of the interval that the analysis is aggregated into, typically // between `5m` and `1h`. This value should be either a whole number of days or @@ -234,3 +234,13 @@ func NewAnalysisConfig() *AnalysisConfig { return r } + +// true + +type AnalysisConfigVariant interface { + AnalysisConfigCaster() *AnalysisConfig +} + +func (s *AnalysisConfig) AnalysisConfigCaster() *AnalysisConfig { + return s +} diff --git a/typedapi/types/analysisconfigread.go b/typedapi/types/analysisconfigread.go index 4c6b68ee7e..f852448e30 100644 --- a/typedapi/types/analysisconfigread.go +++ b/typedapi/types/analysisconfigread.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnalysisConfigRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Analysis.ts#L79-L148 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Analysis.ts#L79-L148 type AnalysisConfigRead struct { // BucketSpan The size of the interval that the analysis is aggregated into, typically // between `5m` and `1h`. @@ -221,3 +221,5 @@ func NewAnalysisConfigRead() *AnalysisConfigRead { return r } + +// false diff --git a/typedapi/types/analysislimits.go b/typedapi/types/analysislimits.go index fc684735ac..d1d41a95aa 100644 --- a/typedapi/types/analysislimits.go +++ b/typedapi/types/analysislimits.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnalysisLimits type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Analysis.ts#L161-L172 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Analysis.ts#L161-L172 type AnalysisLimits struct { // CategorizationExamplesLimit The maximum number of examples stored per category in memory and in the // results data store. If you increase this value, more examples are available, @@ -55,7 +55,7 @@ type AnalysisLimits struct { // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to // create jobs that have `model_memory_limit` values greater than that setting // value. - ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` + ModelMemoryLimit ByteSize `json:"model_memory_limit,omitempty"` } func (s *AnalysisLimits) UnmarshalJSON(data []byte) error { @@ -89,16 +89,9 @@ func (s *AnalysisLimits) UnmarshalJSON(data []byte) error { } case "model_memory_limit": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.ModelMemoryLimit); err != nil { return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.ModelMemoryLimit = &o } } @@ -111,3 +104,13 @@ func NewAnalysisLimits() *AnalysisLimits { return r } + +// true + +type AnalysisLimitsVariant interface { + AnalysisLimitsCaster() *AnalysisLimits +} + +func (s *AnalysisLimits) AnalysisLimitsCaster() *AnalysisLimits { + return s +} diff --git a/typedapi/types/analysismemorylimit.go b/typedapi/types/analysismemorylimit.go index 393206c30d..c7e8158594 100644 --- a/typedapi/types/analysismemorylimit.go +++ b/typedapi/types/analysismemorylimit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnalysisMemoryLimit type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Analysis.ts#L174-L179 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Analysis.ts#L174-L179 type AnalysisMemoryLimit struct { // ModelMemoryLimit Limits can be applied for the resources required to hold the mathematical // models in memory. These limits are approximate and can be set per job. They @@ -78,3 +78,13 @@ func NewAnalysisMemoryLimit() *AnalysisMemoryLimit { return r } + +// true + +type AnalysisMemoryLimitVariant interface { + AnalysisMemoryLimitCaster() *AnalysisMemoryLimit +} + +func (s *AnalysisMemoryLimit) AnalysisMemoryLimitCaster() *AnalysisMemoryLimit { + return s +} diff --git a/typedapi/types/analytics.go b/typedapi/types/analytics.go index 995f20cd16..4b85838706 100644 --- a/typedapi/types/analytics.go +++ b/typedapi/types/analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Analytics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L330-L332 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L340-L342 type Analytics struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -97,3 +97,5 @@ func NewAnalytics() *Analytics { return r } + +// false diff --git a/typedapi/types/analyticscollection.go b/typedapi/types/analyticscollection.go index f9edab92d1..ee571a07e8 100644 --- a/typedapi/types/analyticscollection.go +++ b/typedapi/types/analyticscollection.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // AnalyticsCollection type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/_types/BehavioralAnalytics.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/_types/BehavioralAnalytics.ts#L22-L27 type AnalyticsCollection struct { // EventDataStream Data stream for the collection. EventDataStream EventDataStream `json:"event_data_stream"` @@ -34,3 +34,5 @@ func NewAnalyticsCollection() *AnalyticsCollection { return r } + +// false diff --git a/typedapi/types/analyticsstatistics.go b/typedapi/types/analyticsstatistics.go index 37a3edb94b..c30ace6f64 100644 --- a/typedapi/types/analyticsstatistics.go +++ b/typedapi/types/analyticsstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnalyticsStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L61-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L58-L68 type AnalyticsStatistics struct { BoxplotUsage int64 `json:"boxplot_usage"` CumulativeCardinalityUsage int64 `json:"cumulative_cardinality_usage"` @@ -205,3 +205,5 @@ func NewAnalyticsStatistics() *AnalyticsStatistics { return r } + +// false diff --git a/typedapi/types/analyzedetail.go b/typedapi/types/analyzedetail.go index a581cdee5f..50976cdeb9 100644 --- a/typedapi/types/analyzedetail.go +++ b/typedapi/types/analyzedetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnalyzeDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/analyze/types.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/analyze/types.ts#L24-L30 type AnalyzeDetail struct { Analyzer *AnalyzerDetail `json:"analyzer,omitempty"` Charfilters []CharFilterDetail `json:"charfilters,omitempty"` @@ -100,3 +100,5 @@ func NewAnalyzeDetail() *AnalyzeDetail { return r } + +// false diff --git a/typedapi/types/analyzer.go b/typedapi/types/analyzer.go index fb4b469888..9c74f30636 100644 --- a/typedapi/types/analyzer.go +++ b/typedapi/types/analyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,7 +25,6 @@ package types // CustomAnalyzer // FingerprintAnalyzer // KeywordAnalyzer -// LanguageAnalyzer // NoriAnalyzer // PatternAnalyzer // SimpleAnalyzer @@ -35,7 +34,47 @@ package types // IcuAnalyzer // KuromojiAnalyzer // SnowballAnalyzer +// ArabicAnalyzer +// ArmenianAnalyzer +// BasqueAnalyzer +// BengaliAnalyzer +// BrazilianAnalyzer +// BulgarianAnalyzer +// CatalanAnalyzer +// ChineseAnalyzer +// CjkAnalyzer +// CzechAnalyzer +// DanishAnalyzer // DutchAnalyzer +// EnglishAnalyzer +// EstonianAnalyzer +// FinnishAnalyzer +// FrenchAnalyzer +// GalicianAnalyzer +// GermanAnalyzer +// GreekAnalyzer +// HindiAnalyzer +// HungarianAnalyzer +// IndonesianAnalyzer +// IrishAnalyzer +// ItalianAnalyzer +// LatvianAnalyzer +// LithuanianAnalyzer +// NorwegianAnalyzer +// PersianAnalyzer +// PortugueseAnalyzer +// RomanianAnalyzer +// RussianAnalyzer +// SerbianAnalyzer +// SoraniAnalyzer +// SpanishAnalyzer +// SwedishAnalyzer +// TurkishAnalyzer +// ThaiAnalyzer // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L113-L131 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L350-L403 type Analyzer any + +type AnalyzerVariant interface { + AnalyzerCaster() *Analyzer +} diff --git a/typedapi/types/analyzerdetail.go b/typedapi/types/analyzerdetail.go index 1b5573e6bc..43b205460d 100644 --- a/typedapi/types/analyzerdetail.go +++ b/typedapi/types/analyzerdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnalyzerDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/analyze/types.ts#L32-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/analyze/types.ts#L32-L35 type AnalyzerDetail struct { Name string `json:"name"` Tokens []ExplainAnalyzeToken `json:"tokens"` @@ -80,3 +80,5 @@ func NewAnalyzerDetail() *AnalyzerDetail { return r } + +// false diff --git a/typedapi/types/analyzetoken.go b/typedapi/types/analyzetoken.go index 6c3b17a7b4..31ba325ed0 100644 --- a/typedapi/types/analyzetoken.go +++ b/typedapi/types/analyzetoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnalyzeToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/analyze/types.ts#L37-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/analyze/types.ts#L37-L44 type AnalyzeToken struct { EndOffset int64 `json:"end_offset"` Position int64 `json:"position"` @@ -151,3 +151,5 @@ func NewAnalyzeToken() *AnalyzeToken { return r } + +// false diff --git a/typedapi/types/anomaly.go b/typedapi/types/anomaly.go index 076ca654bd..0f19ad79ad 100644 --- a/typedapi/types/anomaly.go +++ b/typedapi/types/anomaly.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Anomaly type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Anomaly.ts#L24-L121 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Anomaly.ts#L24-L121 type Anomaly struct { // Actual The actual value for the bucket. Actual []Float64 `json:"actual,omitempty"` @@ -388,3 +388,5 @@ func NewAnomaly() *Anomaly { return r } + +// false diff --git a/typedapi/types/anomalycause.go b/typedapi/types/anomalycause.go index 5a41d3a24d..656aa05201 100644 --- a/typedapi/types/anomalycause.go +++ b/typedapi/types/anomalycause.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,22 +31,23 @@ import ( // AnomalyCause type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Anomaly.ts#L123-L138 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Anomaly.ts#L123-L139 type AnomalyCause struct { - Actual []Float64 `json:"actual"` - ByFieldName string `json:"by_field_name"` - ByFieldValue string `json:"by_field_value"` - CorrelatedByFieldValue string `json:"correlated_by_field_value"` - FieldName string `json:"field_name"` - Function string `json:"function"` - FunctionDescription string `json:"function_description"` - Influencers []Influence `json:"influencers"` - OverFieldName string `json:"over_field_name"` - OverFieldValue string `json:"over_field_value"` - PartitionFieldName string `json:"partition_field_name"` - PartitionFieldValue string `json:"partition_field_value"` + Actual []Float64 `json:"actual,omitempty"` + ByFieldName *string `json:"by_field_name,omitempty"` + ByFieldValue *string `json:"by_field_value,omitempty"` + CorrelatedByFieldValue *string `json:"correlated_by_field_value,omitempty"` + FieldName *string `json:"field_name,omitempty"` + Function *string `json:"function,omitempty"` + FunctionDescription *string `json:"function_description,omitempty"` + GeoResults *GeoResults `json:"geo_results,omitempty"` + Influencers []Influence `json:"influencers,omitempty"` + OverFieldName *string `json:"over_field_name,omitempty"` + OverFieldValue *string `json:"over_field_value,omitempty"` + PartitionFieldName *string `json:"partition_field_name,omitempty"` + PartitionFieldValue *string `json:"partition_field_value,omitempty"` Probability Float64 `json:"probability"` - Typical []Float64 `json:"typical"` + Typical []Float64 `json:"typical,omitempty"` } func (s *AnomalyCause) UnmarshalJSON(data []byte) error { @@ -84,7 +85,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.ByFieldValue = o + s.ByFieldValue = &o case "correlated_by_field_value": var tmp json.RawMessage @@ -96,7 +97,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.CorrelatedByFieldValue = o + s.CorrelatedByFieldValue = &o case "field_name": if err := dec.Decode(&s.FieldName); err != nil { @@ -113,7 +114,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Function = o + s.Function = &o case "function_description": var tmp json.RawMessage @@ -125,7 +126,12 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.FunctionDescription = o + s.FunctionDescription = &o + + case "geo_results": + if err := dec.Decode(&s.GeoResults); err != nil { + return fmt.Errorf("%s | %w", "GeoResults", err) + } case "influencers": if err := dec.Decode(&s.Influencers); err != nil { @@ -147,7 +153,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.OverFieldValue = o + s.OverFieldValue = &o case "partition_field_name": var tmp json.RawMessage @@ -159,7 +165,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.PartitionFieldName = o + s.PartitionFieldName = &o case "partition_field_value": var tmp json.RawMessage @@ -171,7 +177,7 @@ func (s *AnomalyCause) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.PartitionFieldValue = o + s.PartitionFieldValue = &o case "probability": var tmp any @@ -205,3 +211,5 @@ func NewAnomalyCause() *AnomalyCause { return r } + +// false diff --git a/typedapi/types/anomalydetectors.go b/typedapi/types/anomalydetectors.go index aeb49bc6dd..a83c475c3f 100644 --- a/typedapi/types/anomalydetectors.go +++ b/typedapi/types/anomalydetectors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnomalyDetectors type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/info/types.ts#L44-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/info/types.ts#L46-L52 type AnomalyDetectors struct { CategorizationAnalyzer CategorizationAnalyzer `json:"categorization_analyzer"` CategorizationExamplesLimit int `json:"categorization_examples_limit"` @@ -162,3 +162,5 @@ func NewAnomalyDetectors() *AnomalyDetectors { return r } + +// false diff --git a/typedapi/types/anomalyexplanation.go b/typedapi/types/anomalyexplanation.go index e3b3263a4a..7ad3a29e09 100644 --- a/typedapi/types/anomalyexplanation.go +++ b/typedapi/types/anomalyexplanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AnomalyExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Anomaly.ts#L156-L197 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Anomaly.ts#L157-L198 type AnomalyExplanation struct { // AnomalyCharacteristicsImpact Impact from the duration and magnitude of the detected anomaly relative to // the historical average. @@ -237,3 +237,5 @@ func NewAnomalyExplanation() *AnomalyExplanation { return r } + +// false diff --git a/typedapi/types/apikey.go b/typedapi/types/apikey.go index d8672e2dea..d28682a43e 100644 --- a/typedapi/types/apikey.go +++ b/typedapi/types/apikey.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,14 +27,23 @@ import ( "fmt" "io" "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/apikeytype" ) // ApiKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/ApiKey.ts#L27-L89 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/ApiKey.ts#L27-L113 type ApiKey struct { + // Access The access granted to cross-cluster API keys. + // The access is composed of permissions for cross cluster search and cross + // cluster replication. + // At least one of them must be specified. + // When specified, the new access assignment fully replaces the previously + // assigned access. + Access *Access `json:"access,omitempty"` // Creation Creation time for the API key in milliseconds. - Creation *int64 `json:"creation,omitempty"` + Creation int64 `json:"creation"` // Expiration Expiration time for the API key in milliseconds. Expiration *int64 `json:"expiration,omitempty"` // Id Id for the API key @@ -42,21 +51,23 @@ type ApiKey struct { // Invalidated Invalidation status for the API key. // If the key has been invalidated, it has a value of `true`. Otherwise, it is // `false`. - Invalidated *bool `json:"invalidated,omitempty"` + Invalidated bool `json:"invalidated"` + // Invalidation If the key has been invalidated, invalidation time in milliseconds. + Invalidation *int64 `json:"invalidation,omitempty"` // LimitedBy The owner user’s permissions associated with the API key. // It is a point-in-time snapshot captured at creation and subsequent updates. // An API key’s effective permissions are an intersection of its assigned // privileges and the owner user’s permissions. LimitedBy []map[string]RoleDescriptor `json:"limited_by,omitempty"` // Metadata Metadata of the API key - Metadata Metadata `json:"metadata,omitempty"` + Metadata Metadata `json:"metadata"` // Name Name of the API key. Name string `json:"name"` // ProfileUid The profile uid for the API key owner principal, if requested and if it // exists ProfileUid *string `json:"profile_uid,omitempty"` // Realm Realm name of the principal for which this API key was created. - Realm *string `json:"realm,omitempty"` + Realm string `json:"realm"` // RealmType Realm type of the principal for which this API key was created RealmType *string `json:"realm_type,omitempty"` // RoleDescriptors The role descriptors assigned to this API key when it was created or last @@ -64,9 +75,13 @@ type ApiKey struct { // An empty role descriptor means the API key inherits the owner user’s // permissions. RoleDescriptors map[string]RoleDescriptor `json:"role_descriptors,omitempty"` - Sort_ []FieldValue `json:"_sort,omitempty"` + // Sort_ Sorting values when using the `sort` parameter with the + // `security.query_api_keys` API. + Sort_ []FieldValue `json:"_sort,omitempty"` + // Type The type of the API key (e.g. `rest` or `cross_cluster`). + Type apikeytype.ApiKeyType `json:"type"` // Username Principal for which this API key was created - Username *string `json:"username,omitempty"` + Username string `json:"username"` } func (s *ApiKey) UnmarshalJSON(data []byte) error { @@ -84,34 +99,19 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { switch t { + case "access": + if err := dec.Decode(&s.Access); err != nil { + return fmt.Errorf("%s | %w", "Access", err) + } + case "creation": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Creation", err) - } - s.Creation = &value - case float64: - f := int64(v) - s.Creation = &f + if err := dec.Decode(&s.Creation); err != nil { + return fmt.Errorf("%s | %w", "Creation", err) } case "expiration": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Expiration", err) - } - s.Expiration = &value - case float64: - f := int64(v) - s.Expiration = &f + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) } case "id": @@ -128,9 +128,14 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "Invalidated", err) } - s.Invalidated = &value + s.Invalidated = value case bool: - s.Invalidated = &v + s.Invalidated = v + } + + case "invalidation": + if err := dec.Decode(&s.Invalidation); err != nil { + return fmt.Errorf("%s | %w", "Invalidation", err) } case "limited_by": @@ -170,7 +175,7 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Realm = &o + s.Realm = o case "realm_type": var tmp json.RawMessage @@ -197,6 +202,11 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Sort_", err) } + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + case "username": if err := dec.Decode(&s.Username); err != nil { return fmt.Errorf("%s | %w", "Username", err) @@ -210,8 +220,10 @@ func (s *ApiKey) UnmarshalJSON(data []byte) error { // NewApiKey returns a ApiKey. func NewApiKey() *ApiKey { r := &ApiKey{ - RoleDescriptors: make(map[string]RoleDescriptor, 0), + RoleDescriptors: make(map[string]RoleDescriptor), } return r } + +// false diff --git a/typedapi/types/apikeyaggregate.go b/typedapi/types/apikeyaggregate.go index 5b163078c0..c90d24af37 100644 --- a/typedapi/types/apikeyaggregate.go +++ b/typedapi/types/apikeyaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -36,5 +36,5 @@ package types // DateRangeAggregate // CompositeAggregate // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_api_keys/types.ts#L123-L140 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_api_keys/types.ts#L122-L139 type ApiKeyAggregate any diff --git a/typedapi/types/apikeyaggregationcontainer.go b/typedapi/types/apikeyaggregationcontainer.go index ba7d72470c..9d136b652e 100644 --- a/typedapi/types/apikeyaggregationcontainer.go +++ b/typedapi/types/apikeyaggregationcontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,8 +30,9 @@ import ( // ApiKeyAggregationContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_api_keys/types.ts#L64-L121 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_api_keys/types.ts#L63-L120 type ApiKeyAggregationContainer struct { + AdditionalApiKeyAggregationContainerProperty map[string]json.RawMessage `json:"-"` // Aggregations Sub-aggregations for this aggregation. // Only applies to bucket aggregations. Aggregations map[string]ApiKeyAggregationContainer `json:"aggregations,omitempty"` @@ -139,16 +140,69 @@ func (s *ApiKeyAggregationContainer) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ValueCount", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalApiKeyAggregationContainerProperty == nil { + s.AdditionalApiKeyAggregationContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalApiKeyAggregationContainerProperty", err) + } + s.AdditionalApiKeyAggregationContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s ApiKeyAggregationContainer) MarshalJSON() ([]byte, error) { + type opt ApiKeyAggregationContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalApiKeyAggregationContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalApiKeyAggregationContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewApiKeyAggregationContainer returns a ApiKeyAggregationContainer. func NewApiKeyAggregationContainer() *ApiKeyAggregationContainer { r := &ApiKeyAggregationContainer{ - Aggregations: make(map[string]ApiKeyAggregationContainer, 0), + AdditionalApiKeyAggregationContainerProperty: make(map[string]json.RawMessage), + Aggregations: make(map[string]ApiKeyAggregationContainer), } return r } + +// true + +type ApiKeyAggregationContainerVariant interface { + ApiKeyAggregationContainerCaster() *ApiKeyAggregationContainer +} + +func (s *ApiKeyAggregationContainer) ApiKeyAggregationContainerCaster() *ApiKeyAggregationContainer { + return s +} diff --git a/typedapi/types/apikeyauthorization.go b/typedapi/types/apikeyauthorization.go index bff65a5392..d52438bb8b 100644 --- a/typedapi/types/apikeyauthorization.go +++ b/typedapi/types/apikeyauthorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ApiKeyAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Authorization.ts#L20-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Authorization.ts#L20-L29 type ApiKeyAuthorization struct { // Id The identifier for the API key. Id string `json:"id"` @@ -89,3 +89,5 @@ func NewApiKeyAuthorization() *ApiKeyAuthorization { return r } + +// false diff --git a/typedapi/types/apikeyfiltersaggregation.go b/typedapi/types/apikeyfiltersaggregation.go index b3b75c3a4d..63dd309bf7 100644 --- a/typedapi/types/apikeyfiltersaggregation.go +++ b/typedapi/types/apikeyfiltersaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ApiKeyFiltersAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_api_keys/types.ts#L208-L228 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_api_keys/types.ts#L207-L227 type ApiKeyFiltersAggregation struct { // Filters Collection of queries from which to build buckets. Filters BucketsApiKeyQueryContainer `json:"filters,omitempty"` @@ -132,3 +132,13 @@ func NewApiKeyFiltersAggregation() *ApiKeyFiltersAggregation { return r } + +// true + +type ApiKeyFiltersAggregationVariant interface { + ApiKeyFiltersAggregationCaster() *ApiKeyFiltersAggregation +} + +func (s *ApiKeyFiltersAggregation) ApiKeyFiltersAggregationCaster() *ApiKeyFiltersAggregation { + return s +} diff --git a/typedapi/types/apikeyquerycontainer.go b/typedapi/types/apikeyquerycontainer.go index 0fcd9aeb3a..875a069a1f 100644 --- a/typedapi/types/apikeyquerycontainer.go +++ b/typedapi/types/apikeyquerycontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,9 +30,10 @@ import ( // ApiKeyQueryContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_api_keys/types.ts#L142-L206 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_api_keys/types.ts#L141-L205 type ApiKeyQueryContainer struct { - // Bool matches documents matching boolean combinations of other queries. + AdditionalApiKeyQueryContainerProperty map[string]json.RawMessage `json:"-"` + // Bool Matches documents matching boolean combinations of other queries. Bool *BoolQuery `json:"bool,omitempty"` // Exists Returns documents that contain an indexed value for a field. Exists *ExistsQuery `json:"exists,omitempty"` @@ -159,20 +160,73 @@ func (s *ApiKeyQueryContainer) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wildcard", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalApiKeyQueryContainerProperty == nil { + s.AdditionalApiKeyQueryContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalApiKeyQueryContainerProperty", err) + } + s.AdditionalApiKeyQueryContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s ApiKeyQueryContainer) MarshalJSON() ([]byte, error) { + type opt ApiKeyQueryContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalApiKeyQueryContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalApiKeyQueryContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewApiKeyQueryContainer returns a ApiKeyQueryContainer. func NewApiKeyQueryContainer() *ApiKeyQueryContainer { r := &ApiKeyQueryContainer{ - Match: make(map[string]MatchQuery, 0), - Prefix: make(map[string]PrefixQuery, 0), - Range: make(map[string]RangeQuery, 0), - Term: make(map[string]TermQuery, 0), - Wildcard: make(map[string]WildcardQuery, 0), + AdditionalApiKeyQueryContainerProperty: make(map[string]json.RawMessage), + Match: make(map[string]MatchQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Term: make(map[string]TermQuery), + Wildcard: make(map[string]WildcardQuery), } return r } + +// true + +type ApiKeyQueryContainerVariant interface { + ApiKeyQueryContainerCaster() *ApiKeyQueryContainer +} + +func (s *ApiKeyQueryContainer) ApiKeyQueryContainerCaster() *ApiKeyQueryContainer { + return s +} diff --git a/typedapi/types/appendprocessor.go b/typedapi/types/appendprocessor.go index f6a9fe5fb1..5f3c8dbd76 100644 --- a/typedapi/types/appendprocessor.go +++ b/typedapi/types/appendprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AppendProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L279-L294 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L328-L343 type AppendProcessor struct { // AllowDuplicates If `false`, the processor does not append values already present in the // field. @@ -145,8 +145,19 @@ func (s *AppendProcessor) UnmarshalJSON(data []byte) error { s.Tag = &o case "value": - if err := dec.Decode(&s.Value); err != nil { - return fmt.Errorf("%s | %w", "Value", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(json.RawMessage) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + s.Value = append(s.Value, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } } } @@ -160,3 +171,13 @@ func NewAppendProcessor() *AppendProcessor { return r } + +// true + +type AppendProcessorVariant interface { + AppendProcessorCaster() *AppendProcessor +} + +func (s *AppendProcessor) AppendProcessorCaster() *AppendProcessor { + return s +} diff --git a/typedapi/types/applicationglobaluserprivileges.go b/typedapi/types/applicationglobaluserprivileges.go index 154141ef56..443e97fa64 100644 --- a/typedapi/types/applicationglobaluserprivileges.go +++ b/typedapi/types/applicationglobaluserprivileges.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ApplicationGlobalUserPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L340-L342 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L434-L436 type ApplicationGlobalUserPrivileges struct { Manage ManageUserPrivileges `json:"manage"` } @@ -33,3 +33,13 @@ func NewApplicationGlobalUserPrivileges() *ApplicationGlobalUserPrivileges { return r } + +// true + +type ApplicationGlobalUserPrivilegesVariant interface { + ApplicationGlobalUserPrivilegesCaster() *ApplicationGlobalUserPrivileges +} + +func (s *ApplicationGlobalUserPrivileges) ApplicationGlobalUserPrivilegesCaster() *ApplicationGlobalUserPrivileges { + return s +} diff --git a/typedapi/types/applicationprivileges.go b/typedapi/types/applicationprivileges.go index 726752a30e..50ac80abbb 100644 --- a/typedapi/types/applicationprivileges.go +++ b/typedapi/types/applicationprivileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ApplicationPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L27-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L27-L40 type ApplicationPrivileges struct { // Application The name of the application to which this entry applies. Application string `json:"application"` @@ -90,3 +90,13 @@ func NewApplicationPrivileges() *ApplicationPrivileges { return r } + +// true + +type ApplicationPrivilegesVariant interface { + ApplicationPrivilegesCaster() *ApplicationPrivileges +} + +func (s *ApplicationPrivileges) ApplicationPrivilegesCaster() *ApplicationPrivileges { + return s +} diff --git a/typedapi/types/applicationprivilegescheck.go b/typedapi/types/applicationprivilegescheck.go index 0510df14ea..bab02c33df 100644 --- a/typedapi/types/applicationprivilegescheck.go +++ b/typedapi/types/applicationprivilegescheck.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,15 +31,15 @@ import ( // ApplicationPrivilegesCheck type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges/types.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges/types.ts#L24-L32 type ApplicationPrivilegesCheck struct { // Application The name of the application. Application string `json:"application"` // Privileges A list of the privileges that you want to check for the specified resources. - // May be either application privilege names, or the names of actions that are + // It may be either application privilege names or the names of actions that are // granted by those privileges Privileges []string `json:"privileges"` - // Resources A list of resource names against which the privileges should be checked + // Resources A list of resource names against which the privileges should be checked. Resources []string `json:"resources"` } @@ -91,3 +91,13 @@ func NewApplicationPrivilegesCheck() *ApplicationPrivilegesCheck { return r } + +// true + +type ApplicationPrivilegesCheckVariant interface { + ApplicationPrivilegesCheckCaster() *ApplicationPrivilegesCheck +} + +func (s *ApplicationPrivilegesCheck) ApplicationPrivilegesCheckCaster() *ApplicationPrivilegesCheck { + return s +} diff --git a/typedapi/types/applicationsprivileges.go b/typedapi/types/applicationsprivileges.go index 009f77e52e..ad6689af2f 100644 --- a/typedapi/types/applicationsprivileges.go +++ b/typedapi/types/applicationsprivileges.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ApplicationsPrivileges type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges/types.ts#L46-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges/types.ts#L47-L47 type ApplicationsPrivileges map[string]ResourcePrivileges diff --git a/typedapi/types/languageanalyzer.go b/typedapi/types/arabicanalyzer.go similarity index 63% rename from typedapi/types/languageanalyzer.go rename to typedapi/types/arabicanalyzer.go index 4ece5260af..15c33f1d95 100644 --- a/typedapi/types/languageanalyzer.go +++ b/typedapi/types/arabicanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,23 +27,19 @@ import ( "fmt" "io" "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/language" ) -// LanguageAnalyzer type. +// ArabicAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L52-L59 -type LanguageAnalyzer struct { - Language language.Language `json:"language"` - StemExclusion []string `json:"stem_exclusion"` - Stopwords []string `json:"stopwords,omitempty"` - StopwordsPath *string `json:"stopwords_path,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L52-L57 +type ArabicAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` } -func (s *LanguageAnalyzer) UnmarshalJSON(data []byte) error { +func (s *ArabicAnalyzer) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -58,11 +54,6 @@ func (s *LanguageAnalyzer) UnmarshalJSON(data []byte) error { switch t { - case "language": - if err := dec.Decode(&s.Language); err != nil { - return fmt.Errorf("%s | %w", "Language", err) - } - case "stem_exclusion": if err := dec.Decode(&s.StemExclusion); err != nil { return fmt.Errorf("%s | %w", "StemExclusion", err) @@ -101,36 +92,39 @@ func (s *LanguageAnalyzer) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Type", err) } - case "version": - if err := dec.Decode(&s.Version); err != nil { - return fmt.Errorf("%s | %w", "Version", err) - } - } } return nil } // MarshalJSON override marshalling to include literal value -func (s LanguageAnalyzer) MarshalJSON() ([]byte, error) { - type innerLanguageAnalyzer LanguageAnalyzer - tmp := innerLanguageAnalyzer{ - Language: s.Language, +func (s ArabicAnalyzer) MarshalJSON() ([]byte, error) { + type innerArabicAnalyzer ArabicAnalyzer + tmp := innerArabicAnalyzer{ StemExclusion: s.StemExclusion, Stopwords: s.Stopwords, StopwordsPath: s.StopwordsPath, Type: s.Type, - Version: s.Version, } - tmp.Type = "language" + tmp.Type = "arabic" return json.Marshal(tmp) } -// NewLanguageAnalyzer returns a LanguageAnalyzer. -func NewLanguageAnalyzer() *LanguageAnalyzer { - r := &LanguageAnalyzer{} +// NewArabicAnalyzer returns a ArabicAnalyzer. +func NewArabicAnalyzer() *ArabicAnalyzer { + r := &ArabicAnalyzer{} return r } + +// true + +type ArabicAnalyzerVariant interface { + ArabicAnalyzerCaster() *ArabicAnalyzer +} + +func (s *ArabicAnalyzer) ArabicAnalyzerCaster() *ArabicAnalyzer { + return s +} diff --git a/typedapi/types/archive.go b/typedapi/types/archive.go index ddda129481..63a465fbc4 100644 --- a/typedapi/types/archive.go +++ b/typedapi/types/archive.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Archive type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L48-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L45-L47 type Archive struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -107,3 +107,5 @@ func NewArchive() *Archive { return r } + +// false diff --git a/typedapi/types/armeniananalyzer.go b/typedapi/types/armeniananalyzer.go new file mode 100644 index 0000000000..7caef9e7ee --- /dev/null +++ b/typedapi/types/armeniananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ArmenianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L59-L64 +type ArmenianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ArmenianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ArmenianAnalyzer) MarshalJSON() ([]byte, error) { + type innerArmenianAnalyzer ArmenianAnalyzer + tmp := innerArmenianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "armenian" + + return json.Marshal(tmp) +} + +// NewArmenianAnalyzer returns a ArmenianAnalyzer. +func NewArmenianAnalyzer() *ArmenianAnalyzer { + r := &ArmenianAnalyzer{} + + return r +} + +// true + +type ArmenianAnalyzerVariant interface { + ArmenianAnalyzerCaster() *ArmenianAnalyzer +} + +func (s *ArmenianAnalyzer) ArmenianAnalyzerCaster() *ArmenianAnalyzer { + return s +} diff --git a/typedapi/types/arraycomparecondition.go b/typedapi/types/arraycomparecondition.go index 0c1fc35c7e..cbd594f313 100644 --- a/typedapi/types/arraycomparecondition.go +++ b/typedapi/types/arraycomparecondition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // ArrayCompareCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Conditions.ts#L32-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Conditions.ts#L32-L39 type ArrayCompareCondition struct { ArrayCompareCondition map[conditionop.ConditionOp]ArrayCompareOpParams `json:"-"` Path string `json:"path"` @@ -121,8 +121,18 @@ func (s ArrayCompareCondition) MarshalJSON() ([]byte, error) { // NewArrayCompareCondition returns a ArrayCompareCondition. func NewArrayCompareCondition() *ArrayCompareCondition { r := &ArrayCompareCondition{ - ArrayCompareCondition: make(map[conditionop.ConditionOp]ArrayCompareOpParams, 0), + ArrayCompareCondition: make(map[conditionop.ConditionOp]ArrayCompareOpParams), } return r } + +// true + +type ArrayCompareConditionVariant interface { + ArrayCompareConditionCaster() *ArrayCompareCondition +} + +func (s *ArrayCompareCondition) ArrayCompareConditionCaster() *ArrayCompareCondition { + return s +} diff --git a/typedapi/types/arraycompareopparams.go b/typedapi/types/arraycompareopparams.go index bc7d026586..26674a3468 100644 --- a/typedapi/types/arraycompareopparams.go +++ b/typedapi/types/arraycompareopparams.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // ArrayCompareOpParams type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Conditions.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Conditions.ts#L27-L30 type ArrayCompareOpParams struct { Quantifier quantifier.Quantifier `json:"quantifier"` Value FieldValue `json:"value"` @@ -74,3 +74,13 @@ func NewArrayCompareOpParams() *ArrayCompareOpParams { return r } + +// true + +type ArrayCompareOpParamsVariant interface { + ArrayCompareOpParamsCaster() *ArrayCompareOpParams +} + +func (s *ArrayCompareOpParams) ArrayCompareOpParamsCaster() *ArrayCompareOpParams { + return s +} diff --git a/typedapi/types/arraypercentilesitem.go b/typedapi/types/arraypercentilesitem.go index bc8e78ce5b..6368c29067 100644 --- a/typedapi/types/arraypercentilesitem.go +++ b/typedapi/types/arraypercentilesitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ArrayPercentilesItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L160-L164 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L162-L166 type ArrayPercentilesItem struct { Key string `json:"key"` Value *Float64 `json:"value,omitempty"` @@ -93,3 +93,5 @@ func NewArrayPercentilesItem() *ArrayPercentilesItem { return r } + +// false diff --git a/typedapi/types/asciifoldingtokenfilter.go b/typedapi/types/asciifoldingtokenfilter.go index 82ed15fb8a..93fedb200a 100644 --- a/typedapi/types/asciifoldingtokenfilter.go +++ b/typedapi/types/asciifoldingtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // AsciiFoldingTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L170-L173 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L169-L172 type AsciiFoldingTokenFilter struct { PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewAsciiFoldingTokenFilter() *AsciiFoldingTokenFilter { return r } + +// true + +type AsciiFoldingTokenFilterVariant interface { + AsciiFoldingTokenFilterCaster() *AsciiFoldingTokenFilter +} + +func (s *AsciiFoldingTokenFilter) AsciiFoldingTokenFilterCaster() *AsciiFoldingTokenFilter { + return s +} diff --git a/typedapi/types/asyncsearch.go b/typedapi/types/asyncsearch.go index 4cdebc694b..6fe8626767 100644 --- a/typedapi/types/asyncsearch.go +++ b/typedapi/types/asyncsearch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,10 +32,10 @@ import ( // AsyncSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/async_search/_types/AsyncSearch.ts#L30-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/async_search/_types/AsyncSearch.ts#L30-L56 type AsyncSearch struct { // Aggregations Partial aggregations results, coming from the shards that have already - // completed the execution of the query. + // completed running the query. Aggregations map[string]Aggregate `json:"aggregations,omitempty"` Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` Fields map[string]json.RawMessage `json:"fields,omitempty"` @@ -502,6 +502,13 @@ func (s *AsyncSearch) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -782,10 +789,12 @@ func (s *AsyncSearch) UnmarshalJSON(data []byte) error { // NewAsyncSearch returns a AsyncSearch. func NewAsyncSearch() *AsyncSearch { r := &AsyncSearch{ - Aggregations: make(map[string]Aggregate, 0), - Fields: make(map[string]json.RawMessage, 0), - Suggest: make(map[string][]Suggest, 0), + Aggregations: make(map[string]Aggregate), + Fields: make(map[string]json.RawMessage), + Suggest: make(map[string][]Suggest), } return r } + +// false diff --git a/typedapi/types/attachmentprocessor.go b/typedapi/types/attachmentprocessor.go index 7a1d707d91..3ccdc0fe66 100644 --- a/typedapi/types/attachmentprocessor.go +++ b/typedapi/types/attachmentprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AttachmentProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L296-L337 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L345-L386 type AttachmentProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -226,3 +226,13 @@ func NewAttachmentProcessor() *AttachmentProcessor { return r } + +// true + +type AttachmentProcessorVariant interface { + AttachmentProcessorCaster() *AttachmentProcessor +} + +func (s *AttachmentProcessor) AttachmentProcessorCaster() *AttachmentProcessor { + return s +} diff --git a/typedapi/types/audit.go b/typedapi/types/audit.go index 3b792c61ce..11244ff629 100644 --- a/typedapi/types/audit.go +++ b/typedapi/types/audit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Audit type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L73-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L70-L72 type Audit struct { Enabled bool `json:"enabled"` Outputs []string `json:"outputs,omitempty"` @@ -82,3 +82,5 @@ func NewAudit() *Audit { return r } + +// false diff --git a/typedapi/types/nodereloaderror.go b/typedapi/types/authenticateapikey.go similarity index 63% rename from typedapi/types/nodereloaderror.go rename to typedapi/types/authenticateapikey.go index 3b1c9cf8da..7e221add06 100644 --- a/typedapi/types/nodereloaderror.go +++ b/typedapi/types/authenticateapikey.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,15 +28,15 @@ import ( "io" ) -// NodeReloadError type. +// AuthenticateApiKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/NodeReloadResult.ts#L24-L27 -type NodeReloadError struct { - Name string `json:"name"` - ReloadException *ErrorCause `json:"reload_exception,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/authenticate/SecurityAuthenticateResponse.ts#L44-L47 +type AuthenticateApiKey struct { + Id string `json:"id"` + Name *string `json:"name,omitempty"` } -func (s *NodeReloadError) UnmarshalJSON(data []byte) error { +func (s *AuthenticateApiKey) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -51,24 +51,26 @@ func (s *NodeReloadError) UnmarshalJSON(data []byte) error { switch t { + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + case "name": if err := dec.Decode(&s.Name); err != nil { return fmt.Errorf("%s | %w", "Name", err) } - case "reload_exception": - if err := dec.Decode(&s.ReloadException); err != nil { - return fmt.Errorf("%s | %w", "ReloadException", err) - } - } } return nil } -// NewNodeReloadError returns a NodeReloadError. -func NewNodeReloadError() *NodeReloadError { - r := &NodeReloadError{} +// NewAuthenticateApiKey returns a AuthenticateApiKey. +func NewAuthenticateApiKey() *AuthenticateApiKey { + r := &AuthenticateApiKey{} return r } + +// false diff --git a/typedapi/types/authenticateduser.go b/typedapi/types/authenticateduser.go index bb4c2a364d..827a4463a3 100644 --- a/typedapi/types/authenticateduser.go +++ b/typedapi/types/authenticateduser.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AuthenticatedUser type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_token/types.ts#L40-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_token/types.ts#L60-L65 type AuthenticatedUser struct { AuthenticationProvider *AuthenticationProvider `json:"authentication_provider,omitempty"` AuthenticationRealm UserRealm `json:"authentication_realm"` @@ -150,3 +150,5 @@ func NewAuthenticatedUser() *AuthenticatedUser { return r } + +// false diff --git a/typedapi/types/authenticatetoken.go b/typedapi/types/authenticatetoken.go index 9f63eb3a84..5eb710c5be 100644 --- a/typedapi/types/authenticatetoken.go +++ b/typedapi/types/authenticatetoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AuthenticateToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/authenticate/types.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/authenticate/types.ts#L22-L29 type AuthenticateToken struct { Name string `json:"name"` Type *string `json:"type,omitempty"` @@ -80,3 +80,5 @@ func NewAuthenticateToken() *AuthenticateToken { return r } + +// false diff --git a/typedapi/types/authentication.go b/typedapi/types/authentication.go new file mode 100644 index 0000000000..2a5fb789c0 --- /dev/null +++ b/typedapi/types/authentication.go @@ -0,0 +1,177 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Authentication type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delegate_pki/SecurityDelegatePkiResponse.ts#L43-L55 +type Authentication struct { + ApiKey map[string]string `json:"api_key,omitempty"` + AuthenticationRealm AuthenticationRealm `json:"authentication_realm"` + AuthenticationType string `json:"authentication_type"` + Email *string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName *string `json:"full_name,omitempty"` + LookupRealm AuthenticationRealm `json:"lookup_realm"` + Metadata Metadata `json:"metadata"` + Roles []string `json:"roles"` + Token map[string]string `json:"token,omitempty"` + Username string `json:"username"` +} + +func (s *Authentication) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + if s.ApiKey == nil { + s.ApiKey = make(map[string]string, 0) + } + if err := dec.Decode(&s.ApiKey); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + + case "authentication_realm": + if err := dec.Decode(&s.AuthenticationRealm); err != nil { + return fmt.Errorf("%s | %w", "AuthenticationRealm", err) + } + + case "authentication_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AuthenticationType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthenticationType = o + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = &o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FullName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FullName = &o + + case "lookup_realm": + if err := dec.Decode(&s.LookupRealm); err != nil { + return fmt.Errorf("%s | %w", "LookupRealm", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "token": + if s.Token == nil { + s.Token = make(map[string]string, 0) + } + if err := dec.Decode(&s.Token); err != nil { + return fmt.Errorf("%s | %w", "Token", err) + } + + case "username": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Username = o + + } + } + return nil +} + +// NewAuthentication returns a Authentication. +func NewAuthentication() *Authentication { + r := &Authentication{ + ApiKey: make(map[string]string), + Token: make(map[string]string), + } + + return r +} + +// false diff --git a/typedapi/types/authenticationprovider.go b/typedapi/types/authenticationprovider.go index 241a3b3334..8da9ec8592 100644 --- a/typedapi/types/authenticationprovider.go +++ b/typedapi/types/authenticationprovider.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AuthenticationProvider type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_token/types.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_token/types.ts#L55-L58 type AuthenticationProvider struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewAuthenticationProvider() *AuthenticationProvider { return r } + +// false diff --git a/typedapi/types/authenticationrealm.go b/typedapi/types/authenticationrealm.go new file mode 100644 index 0000000000..b6e0fddfbb --- /dev/null +++ b/typedapi/types/authenticationrealm.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AuthenticationRealm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delegate_pki/SecurityDelegatePkiResponse.ts#L57-L61 +type AuthenticationRealm struct { + Domain *string `json:"domain,omitempty"` + Name string `json:"name"` + Type string `json:"type"` +} + +func (s *AuthenticationRealm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "domain": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Domain", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Domain = &o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewAuthenticationRealm returns a AuthenticationRealm. +func NewAuthenticationRealm() *AuthenticationRealm { + r := &AuthenticationRealm{} + + return r +} + +// false diff --git a/typedapi/types/autodatehistogramaggregate.go b/typedapi/types/autodatehistogramaggregate.go index 28338fff07..e19940ca31 100644 --- a/typedapi/types/autodatehistogramaggregate.go +++ b/typedapi/types/autodatehistogramaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // AutoDateHistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L358-L362 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L393-L400 type AutoDateHistogramAggregate struct { Buckets BucketsDateHistogramBucket `json:"buckets"` Interval string `json:"interval"` @@ -94,3 +94,5 @@ func NewAutoDateHistogramAggregate() *AutoDateHistogramAggregate { return r } + +// false diff --git a/typedapi/types/autodatehistogramaggregation.go b/typedapi/types/autodatehistogramaggregation.go index 4e30ca03c1..94a00c2d38 100644 --- a/typedapi/types/autodatehistogramaggregation.go +++ b/typedapi/types/autodatehistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // AutoDateHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L67-L102 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L72-L110 type AutoDateHistogramAggregation struct { // Buckets The target number of buckets. Buckets *int `json:"buckets,omitempty"` @@ -154,8 +154,18 @@ func (s *AutoDateHistogramAggregation) UnmarshalJSON(data []byte) error { // NewAutoDateHistogramAggregation returns a AutoDateHistogramAggregation. func NewAutoDateHistogramAggregation() *AutoDateHistogramAggregation { r := &AutoDateHistogramAggregation{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type AutoDateHistogramAggregationVariant interface { + AutoDateHistogramAggregationCaster() *AutoDateHistogramAggregation +} + +func (s *AutoDateHistogramAggregation) AutoDateHistogramAggregationCaster() *AutoDateHistogramAggregation { + return s +} diff --git a/typedapi/types/autofollowedcluster.go b/typedapi/types/autofollowedcluster.go index b861bf2930..fac983a8c5 100644 --- a/typedapi/types/autofollowedcluster.go +++ b/typedapi/types/autofollowedcluster.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // AutoFollowedCluster type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/stats/types.ts.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/stats/types.ts.ts#L26-L30 type AutoFollowedCluster struct { ClusterName string `json:"cluster_name"` LastSeenMetadataVersion int64 `json:"last_seen_metadata_version"` @@ -78,3 +78,5 @@ func NewAutoFollowedCluster() *AutoFollowedCluster { return r } + +// false diff --git a/typedapi/types/autofollowpattern.go b/typedapi/types/autofollowpattern.go index 483d98763b..6fec07ed2d 100644 --- a/typedapi/types/autofollowpattern.go +++ b/typedapi/types/autofollowpattern.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // AutoFollowPattern type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/get_auto_follow_pattern/types.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/get_auto_follow_pattern/types.ts#L23-L26 type AutoFollowPattern struct { Name string `json:"name"` Pattern AutoFollowPatternSummary `json:"pattern"` @@ -72,3 +72,5 @@ func NewAutoFollowPattern() *AutoFollowPattern { return r } + +// false diff --git a/typedapi/types/autofollowpatternsummary.go b/typedapi/types/autofollowpatternsummary.go index 5573399375..afa8dc6171 100644 --- a/typedapi/types/autofollowpatternsummary.go +++ b/typedapi/types/autofollowpatternsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AutoFollowPatternSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/get_auto_follow_pattern/types.ts#L28-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/get_auto_follow_pattern/types.ts#L28-L52 type AutoFollowPatternSummary struct { Active bool `json:"active"` // FollowIndexPattern The name of follower index. @@ -131,3 +131,5 @@ func NewAutoFollowPatternSummary() *AutoFollowPatternSummary { return r } + +// false diff --git a/typedapi/types/autofollowstats.go b/typedapi/types/autofollowstats.go index 40bf1aed33..66c1395c92 100644 --- a/typedapi/types/autofollowstats.go +++ b/typedapi/types/autofollowstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,13 +31,22 @@ import ( // AutoFollowStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/stats/types.ts.ts#L33-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/stats/types.ts.ts#L32-L47 type AutoFollowStats struct { - AutoFollowedClusters []AutoFollowedCluster `json:"auto_followed_clusters"` - NumberOfFailedFollowIndices int64 `json:"number_of_failed_follow_indices"` - NumberOfFailedRemoteClusterStateRequests int64 `json:"number_of_failed_remote_cluster_state_requests"` - NumberOfSuccessfulFollowIndices int64 `json:"number_of_successful_follow_indices"` - RecentAutoFollowErrors []ErrorCause `json:"recent_auto_follow_errors"` + AutoFollowedClusters []AutoFollowedCluster `json:"auto_followed_clusters"` + // NumberOfFailedFollowIndices The number of indices that the auto-follow coordinator failed to + // automatically follow. + // The causes of recent failures are captured in the logs of the elected master + // node and in the `auto_follow_stats.recent_auto_follow_errors` field. + NumberOfFailedFollowIndices int64 `json:"number_of_failed_follow_indices"` + // NumberOfFailedRemoteClusterStateRequests The number of times that the auto-follow coordinator failed to retrieve the + // cluster state from a remote cluster registered in a collection of auto-follow + // patterns. + NumberOfFailedRemoteClusterStateRequests int64 `json:"number_of_failed_remote_cluster_state_requests"` + // NumberOfSuccessfulFollowIndices The number of indices that the auto-follow coordinator successfully followed. + NumberOfSuccessfulFollowIndices int64 `json:"number_of_successful_follow_indices"` + // RecentAutoFollowErrors An array of objects representing failures by the auto-follow coordinator. + RecentAutoFollowErrors []ErrorCause `json:"recent_auto_follow_errors"` } func (s *AutoFollowStats) UnmarshalJSON(data []byte) error { @@ -121,3 +130,5 @@ func NewAutoFollowStats() *AutoFollowStats { return r } + +// false diff --git a/typedapi/types/autoscalingcapacity.go b/typedapi/types/autoscalingcapacity.go index 5e270ad564..14b32bbde8 100644 --- a/typedapi/types/autoscalingcapacity.go +++ b/typedapi/types/autoscalingcapacity.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // AutoscalingCapacity type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L38-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L38-L41 type AutoscalingCapacity struct { Node AutoscalingResources `json:"node"` Total AutoscalingResources `json:"total"` @@ -34,3 +34,5 @@ func NewAutoscalingCapacity() *AutoscalingCapacity { return r } + +// false diff --git a/typedapi/types/autoscalingdecider.go b/typedapi/types/autoscalingdecider.go index af80433838..5b8f7240ad 100644 --- a/typedapi/types/autoscalingdecider.go +++ b/typedapi/types/autoscalingdecider.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AutoscalingDecider type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L52-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L52-L56 type AutoscalingDecider struct { ReasonDetails json.RawMessage `json:"reason_details,omitempty"` ReasonSummary *string `json:"reason_summary,omitempty"` @@ -86,3 +86,5 @@ func NewAutoscalingDecider() *AutoscalingDecider { return r } + +// false diff --git a/typedapi/types/autoscalingdeciders.go b/typedapi/types/autoscalingdeciders.go index faf3977ec4..67b47aeca0 100644 --- a/typedapi/types/autoscalingdeciders.go +++ b/typedapi/types/autoscalingdeciders.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // AutoscalingDeciders type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L31-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L31-L36 type AutoscalingDeciders struct { CurrentCapacity AutoscalingCapacity `json:"current_capacity"` CurrentNodes []AutoscalingNode `json:"current_nodes"` @@ -33,8 +33,10 @@ type AutoscalingDeciders struct { // NewAutoscalingDeciders returns a AutoscalingDeciders. func NewAutoscalingDeciders() *AutoscalingDeciders { r := &AutoscalingDeciders{ - Deciders: make(map[string]AutoscalingDecider, 0), + Deciders: make(map[string]AutoscalingDecider), } return r } + +// false diff --git a/typedapi/types/autoscalingnode.go b/typedapi/types/autoscalingnode.go index ef30d5bf0d..ba87404cb4 100644 --- a/typedapi/types/autoscalingnode.go +++ b/typedapi/types/autoscalingnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // AutoscalingNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L48-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L48-L50 type AutoscalingNode struct { Name string `json:"name"` } @@ -66,3 +66,5 @@ func NewAutoscalingNode() *AutoscalingNode { return r } + +// false diff --git a/typedapi/types/autoscalingpolicy.go b/typedapi/types/autoscalingpolicy.go index 01185f1ecb..7a146eaacf 100644 --- a/typedapi/types/autoscalingpolicy.go +++ b/typedapi/types/autoscalingpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,9 +26,9 @@ import ( // AutoscalingPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/_types/AutoscalingPolicy.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/_types/AutoscalingPolicy.ts#L23-L30 type AutoscalingPolicy struct { - // Deciders Decider settings + // Deciders Decider settings. Deciders map[string]json.RawMessage `json:"deciders"` Roles []string `json:"roles"` } @@ -36,8 +36,18 @@ type AutoscalingPolicy struct { // NewAutoscalingPolicy returns a AutoscalingPolicy. func NewAutoscalingPolicy() *AutoscalingPolicy { r := &AutoscalingPolicy{ - Deciders: make(map[string]json.RawMessage, 0), + Deciders: make(map[string]json.RawMessage), } return r } + +// true + +type AutoscalingPolicyVariant interface { + AutoscalingPolicyCaster() *AutoscalingPolicy +} + +func (s *AutoscalingPolicy) AutoscalingPolicyCaster() *AutoscalingPolicy { + return s +} diff --git a/typedapi/types/autoscalingresources.go b/typedapi/types/autoscalingresources.go index 34d16f1dca..321e82f4ad 100644 --- a/typedapi/types/autoscalingresources.go +++ b/typedapi/types/autoscalingresources.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AutoscalingResources type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L43-L46 type AutoscalingResources struct { Memory int `json:"memory"` Storage int `json:"storage"` @@ -95,3 +95,5 @@ func NewAutoscalingResources() *AutoscalingResources { return r } + +// false diff --git a/typedapi/types/averageaggregation.go b/typedapi/types/averageaggregation.go index bcbbfdff66..e3d6a88d9d 100644 --- a/typedapi/types/averageaggregation.go +++ b/typedapi/types/averageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L55-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L55-L55 type AverageAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewAverageAggregation() *AverageAggregation { return r } + +// true + +type AverageAggregationVariant interface { + AverageAggregationCaster() *AverageAggregation +} + +func (s *AverageAggregation) AverageAggregationCaster() *AverageAggregation { + return s +} diff --git a/typedapi/types/averagebucketaggregation.go b/typedapi/types/averagebucketaggregation.go index ade946c371..2b0469968e 100644 --- a/typedapi/types/averagebucketaggregation.go +++ b/typedapi/types/averagebucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // AverageBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L78-L78 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L78-L81 type AverageBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewAverageBucketAggregation() *AverageBucketAggregation { return r } + +// true + +type AverageBucketAggregationVariant interface { + AverageBucketAggregationCaster() *AverageBucketAggregation +} + +func (s *AverageBucketAggregation) AverageBucketAggregationCaster() *AverageBucketAggregation { + return s +} diff --git a/typedapi/types/avgaggregate.go b/typedapi/types/avgaggregate.go index 9cc20740f8..b41349aba7 100644 --- a/typedapi/types/avgaggregate.go +++ b/typedapi/types/avgaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AvgAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L209-L210 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L218-L222 type AvgAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewAvgAggregate() *AvgAggregate { return r } + +// false diff --git a/typedapi/types/azurerepository.go b/typedapi/types/azurerepository.go index 7f3cc4c092..37b9387674 100644 --- a/typedapi/types/azurerepository.go +++ b/typedapi/types/azurerepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // AzureRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L40-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L40-L43 type AzureRepository struct { Settings AzureRepositorySettings `json:"settings"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewAzureRepository() *AzureRepository { return r } + +// true + +type AzureRepositoryVariant interface { + AzureRepositoryCaster() *AzureRepository +} + +func (s *AzureRepository) AzureRepositoryCaster() *AzureRepository { + return s +} diff --git a/typedapi/types/azurerepositorysettings.go b/typedapi/types/azurerepositorysettings.go index c12b0ce289..c52d27c1f4 100644 --- a/typedapi/types/azurerepositorysettings.go +++ b/typedapi/types/azurerepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // AzureRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L77-L83 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L77-L83 type AzureRepositorySettings struct { BasePath *string `json:"base_path,omitempty"` ChunkSize ByteSize `json:"chunk_size,omitempty"` @@ -161,3 +161,13 @@ func NewAzureRepositorySettings() *AzureRepositorySettings { return r } + +// true + +type AzureRepositorySettingsVariant interface { + AzureRepositorySettingsCaster() *AzureRepositorySettings +} + +func (s *AzureRepositorySettings) AzureRepositorySettingsCaster() *AzureRepositorySettings { + return s +} diff --git a/typedapi/types/base.go b/typedapi/types/base.go index 1923bf93b9..f3a2802ab2 100644 --- a/typedapi/types/base.go +++ b/typedapi/types/base.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Base type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L27-L30 type Base struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -91,3 +91,5 @@ func NewBase() *Base { return r } + +// false diff --git a/typedapi/types/basqueanalyzer.go b/typedapi/types/basqueanalyzer.go new file mode 100644 index 0000000000..ff51f1c879 --- /dev/null +++ b/typedapi/types/basqueanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BasqueAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L66-L71 +type BasqueAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BasqueAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BasqueAnalyzer) MarshalJSON() ([]byte, error) { + type innerBasqueAnalyzer BasqueAnalyzer + tmp := innerBasqueAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "basque" + + return json.Marshal(tmp) +} + +// NewBasqueAnalyzer returns a BasqueAnalyzer. +func NewBasqueAnalyzer() *BasqueAnalyzer { + r := &BasqueAnalyzer{} + + return r +} + +// true + +type BasqueAnalyzerVariant interface { + BasqueAnalyzerCaster() *BasqueAnalyzer +} + +func (s *BasqueAnalyzer) BasqueAnalyzerCaster() *BasqueAnalyzer { + return s +} diff --git a/typedapi/types/bengalianalyzer.go b/typedapi/types/bengalianalyzer.go new file mode 100644 index 0000000000..0512abe09e --- /dev/null +++ b/typedapi/types/bengalianalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BengaliAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L73-L78 +type BengaliAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BengaliAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BengaliAnalyzer) MarshalJSON() ([]byte, error) { + type innerBengaliAnalyzer BengaliAnalyzer + tmp := innerBengaliAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "bengali" + + return json.Marshal(tmp) +} + +// NewBengaliAnalyzer returns a BengaliAnalyzer. +func NewBengaliAnalyzer() *BengaliAnalyzer { + r := &BengaliAnalyzer{} + + return r +} + +// true + +type BengaliAnalyzerVariant interface { + BengaliAnalyzerCaster() *BengaliAnalyzer +} + +func (s *BengaliAnalyzer) BengaliAnalyzerCaster() *BengaliAnalyzer { + return s +} diff --git a/typedapi/types/binaryproperty.go b/typedapi/types/binaryproperty.go index 80874aaf5d..1e992c59eb 100644 --- a/typedapi/types/binaryproperty.go +++ b/typedapi/types/binaryproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // BinaryProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L55-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L54-L56 type BinaryProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -41,11 +42,11 @@ type BinaryProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *BinaryProperty) UnmarshalJSON(data []byte) error { @@ -117,301 +118,313 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -460,318 +473,318 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -786,6 +799,11 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -800,16 +818,16 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { func (s BinaryProperty) MarshalJSON() ([]byte, error) { type innerBinaryProperty BinaryProperty tmp := innerBinaryProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "binary" @@ -820,10 +838,20 @@ func (s BinaryProperty) MarshalJSON() ([]byte, error) { // NewBinaryProperty returns a BinaryProperty. func NewBinaryProperty() *BinaryProperty { r := &BinaryProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type BinaryPropertyVariant interface { + BinaryPropertyCaster() *BinaryProperty +} + +func (s *BinaryProperty) BinaryPropertyCaster() *BinaryProperty { + return s +} diff --git a/typedapi/types/blobdetails.go b/typedapi/types/blobdetails.go new file mode 100644 index 0000000000..7ee808d151 --- /dev/null +++ b/typedapi/types/blobdetails.go @@ -0,0 +1,178 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BlobDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L250-L284 +type BlobDetails struct { + // Name The name of the blob. + Name string `json:"name"` + // Overwritten Indicates whether the blob was overwritten while the read operations were + // ongoing. + // + // /** + Overwritten bool `json:"overwritten"` + ReadEarly bool `json:"read_early"` + // ReadEnd The position, in bytes, at which read operations completed. + ReadEnd int64 `json:"read_end"` + // ReadStart The position, in bytes, at which read operations started. + ReadStart int64 `json:"read_start"` + // Reads A description of every read operation performed on the blob. + Reads ReadBlobDetails `json:"reads"` + // Size The size of the blob. + Size ByteSize `json:"size"` + // SizeBytes The size of the blob in bytes. + SizeBytes int64 `json:"size_bytes"` +} + +func (s *BlobDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "overwritten": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Overwritten", err) + } + s.Overwritten = value + case bool: + s.Overwritten = v + } + + case "read_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReadEarly", err) + } + s.ReadEarly = value + case bool: + s.ReadEarly = v + } + + case "read_end": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReadEnd", err) + } + s.ReadEnd = value + case float64: + f := int64(v) + s.ReadEnd = f + } + + case "read_start": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReadStart", err) + } + s.ReadStart = value + case float64: + f := int64(v) + s.ReadStart = f + } + + case "reads": + if err := dec.Decode(&s.Reads); err != nil { + return fmt.Errorf("%s | %w", "Reads", err) + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeBytes", err) + } + s.SizeBytes = value + case float64: + f := int64(v) + s.SizeBytes = f + } + + } + } + return nil +} + +// NewBlobDetails returns a BlobDetails. +func NewBlobDetails() *BlobDetails { + r := &BlobDetails{} + + return r +} + +// false diff --git a/typedapi/types/booleanproperty.go b/typedapi/types/booleanproperty.go index 932d7ebb3f..801aa85a16 100644 --- a/typedapi/types/booleanproperty.go +++ b/typedapi/types/booleanproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // BooleanProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L59-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L58-L64 type BooleanProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -44,12 +45,12 @@ type BooleanProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *bool `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *bool `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *BooleanProperty) UnmarshalJSON(data []byte) error { @@ -142,301 +143,313 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -513,318 +526,318 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -839,6 +852,11 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -853,20 +871,20 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { func (s BooleanProperty) MarshalJSON() ([]byte, error) { type innerBooleanProperty BooleanProperty tmp := innerBooleanProperty{ - Boost: s.Boost, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fielddata: s.Fielddata, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - NullValue: s.NullValue, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fielddata: s.Fielddata, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "boolean" @@ -877,10 +895,20 @@ func (s BooleanProperty) MarshalJSON() ([]byte, error) { // NewBooleanProperty returns a BooleanProperty. func NewBooleanProperty() *BooleanProperty { r := &BooleanProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type BooleanPropertyVariant interface { + BooleanPropertyCaster() *BooleanProperty +} + +func (s *BooleanProperty) BooleanPropertyCaster() *BooleanProperty { + return s +} diff --git a/typedapi/types/boolquery.go b/typedapi/types/boolquery.go index 5e32b6d9d6..6e2492163c 100644 --- a/typedapi/types/boolquery.go +++ b/typedapi/types/boolquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BoolQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L29-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L29-L56 type BoolQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -179,3 +179,13 @@ func NewBoolQuery() *BoolQuery { return r } + +// true + +type BoolQueryVariant interface { + BoolQueryCaster() *BoolQuery +} + +func (s *BoolQuery) BoolQueryCaster() *BoolQuery { + return s +} diff --git a/typedapi/types/boostingquery.go b/typedapi/types/boostingquery.go index 14ddfe64ec..fcef289848 100644 --- a/typedapi/types/boostingquery.go +++ b/typedapi/types/boostingquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BoostingQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L55-L68 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L58-L74 type BoostingQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -40,12 +40,12 @@ type BoostingQuery struct { // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` // Negative Query used to decrease the relevance score of matching documents. - Negative *Query `json:"negative,omitempty"` + Negative Query `json:"negative"` // NegativeBoost Floating point number between 0 and 1.0 used to decrease the relevance scores // of documents matching the `negative` query. NegativeBoost Float64 `json:"negative_boost"` // Positive Any returned documents must match this query. - Positive *Query `json:"positive,omitempty"` + Positive Query `json:"positive"` QueryName_ *string `json:"_name,omitempty"` } @@ -129,3 +129,13 @@ func NewBoostingQuery() *BoostingQuery { return r } + +// true + +type BoostingQueryVariant interface { + BoostingQueryCaster() *BoostingQuery +} + +func (s *BoostingQuery) BoostingQueryCaster() *BoostingQuery { + return s +} diff --git a/typedapi/types/boxplotaggregate.go b/typedapi/types/boxplotaggregate.go index 56c7c445e9..d12fde5e6d 100644 --- a/typedapi/types/boxplotaggregate.go +++ b/typedapi/types/boxplotaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BoxPlotAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L713-L729 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L806-L825 type BoxPlotAggregate struct { Lower Float64 `json:"lower"` LowerAsString *string `json:"lower_as_string,omitempty"` @@ -277,3 +277,5 @@ func NewBoxPlotAggregate() *BoxPlotAggregate { return r } + +// false diff --git a/typedapi/types/boxplotaggregation.go b/typedapi/types/boxplotaggregation.go index f2f799669c..b3c30a9e6b 100644 --- a/typedapi/types/boxplotaggregation.go +++ b/typedapi/types/boxplotaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BoxplotAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L57-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L57-L62 type BoxplotAggregation struct { // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm // to `20 * compression`, enabling control of memory usage and approximation @@ -102,3 +102,13 @@ func NewBoxplotAggregation() *BoxplotAggregation { return r } + +// true + +type BoxplotAggregationVariant interface { + BoxplotAggregationCaster() *BoxplotAggregation +} + +func (s *BoxplotAggregation) BoxplotAggregationCaster() *BoxplotAggregation { + return s +} diff --git a/typedapi/types/braziliananalyzer.go b/typedapi/types/braziliananalyzer.go new file mode 100644 index 0000000000..307e357947 --- /dev/null +++ b/typedapi/types/braziliananalyzer.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BrazilianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L80-L84 +type BrazilianAnalyzer struct { + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BrazilianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BrazilianAnalyzer) MarshalJSON() ([]byte, error) { + type innerBrazilianAnalyzer BrazilianAnalyzer + tmp := innerBrazilianAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "brazilian" + + return json.Marshal(tmp) +} + +// NewBrazilianAnalyzer returns a BrazilianAnalyzer. +func NewBrazilianAnalyzer() *BrazilianAnalyzer { + r := &BrazilianAnalyzer{} + + return r +} + +// true + +type BrazilianAnalyzerVariant interface { + BrazilianAnalyzerCaster() *BrazilianAnalyzer +} + +func (s *BrazilianAnalyzer) BrazilianAnalyzerCaster() *BrazilianAnalyzer { + return s +} diff --git a/typedapi/types/breaker.go b/typedapi/types/breaker.go index a811c4aa1f..aa92e93c16 100644 --- a/typedapi/types/breaker.go +++ b/typedapi/types/breaker.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Breaker type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L434-L459 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L470-L495 type Breaker struct { // EstimatedSize Estimated memory used for the operation. EstimatedSize *string `json:"estimated_size,omitempty"` @@ -161,3 +161,5 @@ func NewBreaker() *Breaker { return r } + +// false diff --git a/typedapi/types/bucketcorrelationaggregation.go b/typedapi/types/bucketcorrelationaggregation.go index 3605c24304..dbfb8efceb 100644 --- a/typedapi/types/bucketcorrelationaggregation.go +++ b/typedapi/types/bucketcorrelationaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // BucketCorrelationAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L129-L135 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L139-L146 type BucketCorrelationAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -74,3 +74,13 @@ func NewBucketCorrelationAggregation() *BucketCorrelationAggregation { return r } + +// true + +type BucketCorrelationAggregationVariant interface { + BucketCorrelationAggregationCaster() *BucketCorrelationAggregation +} + +func (s *BucketCorrelationAggregation) BucketCorrelationAggregationCaster() *BucketCorrelationAggregation { + return s +} diff --git a/typedapi/types/bucketcorrelationfunction.go b/typedapi/types/bucketcorrelationfunction.go index 8df427dfd2..8643d300e4 100644 --- a/typedapi/types/bucketcorrelationfunction.go +++ b/typedapi/types/bucketcorrelationfunction.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // BucketCorrelationFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L137-L142 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L148-L153 type BucketCorrelationFunction struct { // CountCorrelation The configuration to calculate a count correlation. This function is designed // for determining the correlation of a term value and a given metric. @@ -35,3 +35,13 @@ func NewBucketCorrelationFunction() *BucketCorrelationFunction { return r } + +// true + +type BucketCorrelationFunctionVariant interface { + BucketCorrelationFunctionCaster() *BucketCorrelationFunction +} + +func (s *BucketCorrelationFunction) BucketCorrelationFunctionCaster() *BucketCorrelationFunction { + return s +} diff --git a/typedapi/types/bucketcorrelationfunctioncountcorrelation.go b/typedapi/types/bucketcorrelationfunctioncountcorrelation.go index 528bd324fa..11fc4b51db 100644 --- a/typedapi/types/bucketcorrelationfunctioncountcorrelation.go +++ b/typedapi/types/bucketcorrelationfunctioncountcorrelation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // BucketCorrelationFunctionCountCorrelation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L144-L147 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L155-L158 type BucketCorrelationFunctionCountCorrelation struct { // Indicator The indicator with which to correlate the configured `bucket_path` values. Indicator BucketCorrelationFunctionCountCorrelationIndicator `json:"indicator"` @@ -34,3 +34,13 @@ func NewBucketCorrelationFunctionCountCorrelation() *BucketCorrelationFunctionCo return r } + +// true + +type BucketCorrelationFunctionCountCorrelationVariant interface { + BucketCorrelationFunctionCountCorrelationCaster() *BucketCorrelationFunctionCountCorrelation +} + +func (s *BucketCorrelationFunctionCountCorrelation) BucketCorrelationFunctionCountCorrelationCaster() *BucketCorrelationFunctionCountCorrelation { + return s +} diff --git a/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go b/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go index 6383b79d1e..284f9290f4 100644 --- a/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go +++ b/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BucketCorrelationFunctionCountCorrelationIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L149-L167 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L160-L178 type BucketCorrelationFunctionCountCorrelationIndicator struct { // DocCount The total number of documents that initially created the expectations. It’s // required to be greater @@ -104,3 +104,13 @@ func NewBucketCorrelationFunctionCountCorrelationIndicator() *BucketCorrelationF return r } + +// true + +type BucketCorrelationFunctionCountCorrelationIndicatorVariant interface { + BucketCorrelationFunctionCountCorrelationIndicatorCaster() *BucketCorrelationFunctionCountCorrelationIndicator +} + +func (s *BucketCorrelationFunctionCountCorrelationIndicator) BucketCorrelationFunctionCountCorrelationIndicatorCaster() *BucketCorrelationFunctionCountCorrelationIndicator { + return s +} diff --git a/typedapi/types/bucketinfluencer.go b/typedapi/types/bucketinfluencer.go index abdc06dad0..487d43528f 100644 --- a/typedapi/types/bucketinfluencer.go +++ b/typedapi/types/bucketinfluencer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BucketInfluencer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Bucket.ts#L80-L128 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Bucket.ts#L79-L127 type BucketInfluencer struct { // AnomalyScore A normalized score between 0-100, which is calculated for each bucket // influencer. This score might be updated as @@ -208,3 +208,5 @@ func NewBucketInfluencer() *BucketInfluencer { return r } + +// false diff --git a/typedapi/types/bucketksaggregation.go b/typedapi/types/bucketksaggregation.go index 20fc609089..972447d8e4 100644 --- a/typedapi/types/bucketksaggregation.go +++ b/typedapi/types/bucketksaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BucketKsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L94-L127 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L103-L137 type BucketKsAggregation struct { // Alternative A list of string values indicating which K-S test alternative to calculate. // The valid values @@ -115,3 +115,13 @@ func NewBucketKsAggregation() *BucketKsAggregation { return r } + +// true + +type BucketKsAggregationVariant interface { + BucketKsAggregationCaster() *BucketKsAggregation +} + +func (s *BucketKsAggregation) BucketKsAggregationCaster() *BucketKsAggregation { + return s +} diff --git a/typedapi/types/bucketmetricvalueaggregate.go b/typedapi/types/bucketmetricvalueaggregate.go index 038c12dcdb..0f47628b2a 100644 --- a/typedapi/types/bucketmetricvalueaggregate.go +++ b/typedapi/types/bucketmetricvalueaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BucketMetricValueAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L233-L236 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L250-L253 type BucketMetricValueAggregate struct { Keys []string `json:"keys"` Meta Metadata `json:"meta,omitempty"` @@ -95,3 +95,5 @@ func NewBucketMetricValueAggregate() *BucketMetricValueAggregate { return r } + +// false diff --git a/typedapi/types/bucketsadjacencymatrixbucket.go b/typedapi/types/bucketsadjacencymatrixbucket.go index 0a6f3b0cd3..6363e699c1 100644 --- a/typedapi/types/bucketsadjacencymatrixbucket.go +++ b/typedapi/types/bucketsadjacencymatrixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]AdjacencyMatrixBucket // []AdjacencyMatrixBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsAdjacencyMatrixBucket any diff --git a/typedapi/types/bucketsapikeyquerycontainer.go b/typedapi/types/bucketsapikeyquerycontainer.go index dc37ed66db..62286dfc0b 100644 --- a/typedapi/types/bucketsapikeyquerycontainer.go +++ b/typedapi/types/bucketsapikeyquerycontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // map[string]ApiKeyQueryContainer // []ApiKeyQueryContainer // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsApiKeyQueryContainer any + +type BucketsApiKeyQueryContainerVariant interface { + BucketsApiKeyQueryContainerCaster() *BucketsApiKeyQueryContainer +} diff --git a/typedapi/types/bucketscompositebucket.go b/typedapi/types/bucketscompositebucket.go index 4381454ded..6220d18d19 100644 --- a/typedapi/types/bucketscompositebucket.go +++ b/typedapi/types/bucketscompositebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]CompositeBucket // []CompositeBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsCompositeBucket any diff --git a/typedapi/types/bucketscriptaggregation.go b/typedapi/types/bucketscriptaggregation.go index 053507af0a..4d9d836908 100644 --- a/typedapi/types/bucketscriptaggregation.go +++ b/typedapi/types/bucketscriptaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // BucketScriptAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L80-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L83-L91 type BucketScriptAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -100,3 +100,13 @@ func NewBucketScriptAggregation() *BucketScriptAggregation { return r } + +// true + +type BucketScriptAggregationVariant interface { + BucketScriptAggregationCaster() *BucketScriptAggregation +} + +func (s *BucketScriptAggregation) BucketScriptAggregationCaster() *BucketScriptAggregation { + return s +} diff --git a/typedapi/types/bucketsdatehistogrambucket.go b/typedapi/types/bucketsdatehistogrambucket.go index 41859b5c0d..88453a5c21 100644 --- a/typedapi/types/bucketsdatehistogrambucket.go +++ b/typedapi/types/bucketsdatehistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]DateHistogramBucket // []DateHistogramBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsDateHistogramBucket any diff --git a/typedapi/types/bucketsdoubletermsbucket.go b/typedapi/types/bucketsdoubletermsbucket.go index 6177edf5bb..5b221fbc93 100644 --- a/typedapi/types/bucketsdoubletermsbucket.go +++ b/typedapi/types/bucketsdoubletermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]DoubleTermsBucket // []DoubleTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsDoubleTermsBucket any diff --git a/typedapi/types/bucketselectoraggregation.go b/typedapi/types/bucketselectoraggregation.go index fb2302be63..e2ef7126e8 100644 --- a/typedapi/types/bucketselectoraggregation.go +++ b/typedapi/types/bucketselectoraggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // BucketSelectorAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L87-L92 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L93-L101 type BucketSelectorAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -100,3 +100,13 @@ func NewBucketSelectorAggregation() *BucketSelectorAggregation { return r } + +// true + +type BucketSelectorAggregationVariant interface { + BucketSelectorAggregationCaster() *BucketSelectorAggregation +} + +func (s *BucketSelectorAggregation) BucketSelectorAggregationCaster() *BucketSelectorAggregation { + return s +} diff --git a/typedapi/types/bucketsfiltersbucket.go b/typedapi/types/bucketsfiltersbucket.go index 87ea834bfd..661cb2c763 100644 --- a/typedapi/types/bucketsfiltersbucket.go +++ b/typedapi/types/bucketsfiltersbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]FiltersBucket // []FiltersBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsFiltersBucket any diff --git a/typedapi/types/bucketsfrequentitemsetsbucket.go b/typedapi/types/bucketsfrequentitemsetsbucket.go index bf71fb80a4..7191738f94 100644 --- a/typedapi/types/bucketsfrequentitemsetsbucket.go +++ b/typedapi/types/bucketsfrequentitemsetsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]FrequentItemSetsBucket // []FrequentItemSetsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsFrequentItemSetsBucket any diff --git a/typedapi/types/bucketsgeohashgridbucket.go b/typedapi/types/bucketsgeohashgridbucket.go index 3353bcb75c..621fb22a37 100644 --- a/typedapi/types/bucketsgeohashgridbucket.go +++ b/typedapi/types/bucketsgeohashgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]GeoHashGridBucket // []GeoHashGridBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsGeoHashGridBucket any diff --git a/typedapi/types/bucketsgeohexgridbucket.go b/typedapi/types/bucketsgeohexgridbucket.go index 766d7c2a57..d668cf5add 100644 --- a/typedapi/types/bucketsgeohexgridbucket.go +++ b/typedapi/types/bucketsgeohexgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]GeoHexGridBucket // []GeoHexGridBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsGeoHexGridBucket any diff --git a/typedapi/types/bucketsgeotilegridbucket.go b/typedapi/types/bucketsgeotilegridbucket.go index bddc8039e2..34b1e86d3f 100644 --- a/typedapi/types/bucketsgeotilegridbucket.go +++ b/typedapi/types/bucketsgeotilegridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]GeoTileGridBucket // []GeoTileGridBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsGeoTileGridBucket any diff --git a/typedapi/types/bucketshistogrambucket.go b/typedapi/types/bucketshistogrambucket.go index dcf9ee48f4..a0d62efb78 100644 --- a/typedapi/types/bucketshistogrambucket.go +++ b/typedapi/types/bucketshistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]HistogramBucket // []HistogramBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsHistogramBucket any diff --git a/typedapi/types/bucketsipprefixbucket.go b/typedapi/types/bucketsipprefixbucket.go index 20747fa465..c42e835323 100644 --- a/typedapi/types/bucketsipprefixbucket.go +++ b/typedapi/types/bucketsipprefixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]IpPrefixBucket // []IpPrefixBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsIpPrefixBucket any diff --git a/typedapi/types/bucketsiprangebucket.go b/typedapi/types/bucketsiprangebucket.go index 5ed091352d..7eb7002f37 100644 --- a/typedapi/types/bucketsiprangebucket.go +++ b/typedapi/types/bucketsiprangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]IpRangeBucket // []IpRangeBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsIpRangeBucket any diff --git a/typedapi/types/bucketslongraretermsbucket.go b/typedapi/types/bucketslongraretermsbucket.go index 56f2367f57..f371c18d14 100644 --- a/typedapi/types/bucketslongraretermsbucket.go +++ b/typedapi/types/bucketslongraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]LongRareTermsBucket // []LongRareTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsLongRareTermsBucket any diff --git a/typedapi/types/bucketslongtermsbucket.go b/typedapi/types/bucketslongtermsbucket.go index 1cb669bbca..26db62e859 100644 --- a/typedapi/types/bucketslongtermsbucket.go +++ b/typedapi/types/bucketslongtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]LongTermsBucket // []LongTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsLongTermsBucket any diff --git a/typedapi/types/bucketsmultitermsbucket.go b/typedapi/types/bucketsmultitermsbucket.go index 7153b698d5..aed1804bbf 100644 --- a/typedapi/types/bucketsmultitermsbucket.go +++ b/typedapi/types/bucketsmultitermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]MultiTermsBucket // []MultiTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsMultiTermsBucket any diff --git a/typedapi/types/bucketsortaggregation.go b/typedapi/types/bucketsortaggregation.go index 115ca7eb78..46bf0a5f8f 100644 --- a/typedapi/types/bucketsortaggregation.go +++ b/typedapi/types/bucketsortaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // BucketSortAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L169-L190 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L180-L204 type BucketSortAggregation struct { // From Buckets in positions prior to `from` will be truncated. From *int `json:"from,omitempty"` @@ -125,3 +125,13 @@ func NewBucketSortAggregation() *BucketSortAggregation { return r } + +// true + +type BucketSortAggregationVariant interface { + BucketSortAggregationCaster() *BucketSortAggregation +} + +func (s *BucketSortAggregation) BucketSortAggregationCaster() *BucketSortAggregation { + return s +} diff --git a/typedapi/types/bucketspath.go b/typedapi/types/bucketspath.go index c977d21419..b84f5e8aff 100644 --- a/typedapi/types/bucketspath.go +++ b/typedapi/types/bucketspath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,9 @@ package types // []string // map[string]string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L53-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L53-L59 type BucketsPath any + +type BucketsPathVariant interface { + BucketsPathCaster() *BucketsPath +} diff --git a/typedapi/types/bucketsquery.go b/typedapi/types/bucketsquery.go index a2c78a7759..3612763d78 100644 --- a/typedapi/types/bucketsquery.go +++ b/typedapi/types/bucketsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // map[string]Query // []Query // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsQuery any + +type BucketsQueryVariant interface { + BucketsQueryCaster() *BucketsQuery +} diff --git a/typedapi/types/bucketsrangebucket.go b/typedapi/types/bucketsrangebucket.go index 15c237ae19..e6c0f82824 100644 --- a/typedapi/types/bucketsrangebucket.go +++ b/typedapi/types/bucketsrangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]RangeBucket // []RangeBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsRangeBucket any diff --git a/typedapi/types/bucketssignificantlongtermsbucket.go b/typedapi/types/bucketssignificantlongtermsbucket.go index e71897f69a..fe3ba7a129 100644 --- a/typedapi/types/bucketssignificantlongtermsbucket.go +++ b/typedapi/types/bucketssignificantlongtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]SignificantLongTermsBucket // []SignificantLongTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsSignificantLongTermsBucket any diff --git a/typedapi/types/bucketssignificantstringtermsbucket.go b/typedapi/types/bucketssignificantstringtermsbucket.go index 8a25ec0ddf..8ec2a16142 100644 --- a/typedapi/types/bucketssignificantstringtermsbucket.go +++ b/typedapi/types/bucketssignificantstringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]SignificantStringTermsBucket // []SignificantStringTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsSignificantStringTermsBucket any diff --git a/typedapi/types/bucketsstringraretermsbucket.go b/typedapi/types/bucketsstringraretermsbucket.go index 2f4e3b35f6..4de79cad36 100644 --- a/typedapi/types/bucketsstringraretermsbucket.go +++ b/typedapi/types/bucketsstringraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]StringRareTermsBucket // []StringRareTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsStringRareTermsBucket any diff --git a/typedapi/types/bucketsstringtermsbucket.go b/typedapi/types/bucketsstringtermsbucket.go index 14817d23f9..7df9da3eeb 100644 --- a/typedapi/types/bucketsstringtermsbucket.go +++ b/typedapi/types/bucketsstringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]StringTermsBucket // []StringTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsStringTermsBucket any diff --git a/typedapi/types/bucketstimeseriesbucket.go b/typedapi/types/bucketstimeseriesbucket.go new file mode 100644 index 0000000000..1dd3ec433b --- /dev/null +++ b/typedapi/types/bucketstimeseriesbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// BucketsTimeSeriesBucket holds the union for the following types: +// +// map[string]TimeSeriesBucket +// []TimeSeriesBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsTimeSeriesBucket any diff --git a/typedapi/types/bucketsummary.go b/typedapi/types/bucketsummary.go index 76527d3dee..0edb10f3fb 100644 --- a/typedapi/types/bucketsummary.go +++ b/typedapi/types/bucketsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BucketSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Bucket.ts#L31-L78 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Bucket.ts#L30-L77 type BucketSummary struct { // AnomalyScore The maximum anomaly score, between 0-100, for any of the bucket influencers. // This is an overall, rate-limited @@ -198,3 +198,5 @@ func NewBucketSummary() *BucketSummary { return r } + +// false diff --git a/typedapi/types/bucketsvariablewidthhistogrambucket.go b/typedapi/types/bucketsvariablewidthhistogrambucket.go index 1e07c69d8a..5dbbf50c2b 100644 --- a/typedapi/types/bucketsvariablewidthhistogrambucket.go +++ b/typedapi/types/bucketsvariablewidthhistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]VariableWidthHistogramBucket // []VariableWidthHistogramBucket // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsVariableWidthHistogramBucket any diff --git a/typedapi/types/bucketsvoid.go b/typedapi/types/bucketsvoid.go index 6137beffcc..d887cbcb6b 100644 --- a/typedapi/types/bucketsvoid.go +++ b/typedapi/types/bucketsvoid.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // map[string]any // []any // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L316-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L346-L355 type BucketsVoid any diff --git a/typedapi/types/buildinformation.go b/typedapi/types/buildinformation.go index 8b5ee21358..5c2aa1968e 100644 --- a/typedapi/types/buildinformation.go +++ b/typedapi/types/buildinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BuildInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/info/types.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/info/types.ts#L24-L27 type BuildInformation struct { Date DateTime `json:"date"` Hash string `json:"hash"` @@ -80,3 +80,5 @@ func NewBuildInformation() *BuildInformation { return r } + +// false diff --git a/typedapi/types/bulgariananalyzer.go b/typedapi/types/bulgariananalyzer.go new file mode 100644 index 0000000000..8bbc06f914 --- /dev/null +++ b/typedapi/types/bulgariananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BulgarianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L86-L91 +type BulgarianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BulgarianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BulgarianAnalyzer) MarshalJSON() ([]byte, error) { + type innerBulgarianAnalyzer BulgarianAnalyzer + tmp := innerBulgarianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "bulgarian" + + return json.Marshal(tmp) +} + +// NewBulgarianAnalyzer returns a BulgarianAnalyzer. +func NewBulgarianAnalyzer() *BulgarianAnalyzer { + r := &BulgarianAnalyzer{} + + return r +} + +// true + +type BulgarianAnalyzerVariant interface { + BulgarianAnalyzerCaster() *BulgarianAnalyzer +} + +func (s *BulgarianAnalyzer) BulgarianAnalyzerCaster() *BulgarianAnalyzer { + return s +} diff --git a/typedapi/types/bulkerror.go b/typedapi/types/bulkerror.go index 4d8e26caa9..0b28bb9e78 100644 --- a/typedapi/types/bulkerror.go +++ b/typedapi/types/bulkerror.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BulkError type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Bulk.ts#L24-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Bulk.ts#L24-L33 type BulkError struct { // Count The number of errors Count int `json:"count"` @@ -86,8 +86,10 @@ func (s *BulkError) UnmarshalJSON(data []byte) error { // NewBulkError returns a BulkError. func NewBulkError() *BulkError { r := &BulkError{ - Details: make(map[string]ErrorCause, 0), + Details: make(map[string]ErrorCause), } return r } + +// false diff --git a/typedapi/types/bulkindexbyscrollfailure.go b/typedapi/types/bulkindexbyscrollfailure.go index 97e915c446..6c3ba07544 100644 --- a/typedapi/types/bulkindexbyscrollfailure.go +++ b/typedapi/types/bulkindexbyscrollfailure.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,13 +31,12 @@ import ( // BulkIndexByScrollFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Errors.ts#L60-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Errors.ts#L60-L65 type BulkIndexByScrollFailure struct { Cause ErrorCause `json:"cause"` Id string `json:"id"` Index string `json:"index"` Status int `json:"status"` - Type string `json:"type"` } func (s *BulkIndexByScrollFailure) UnmarshalJSON(data []byte) error { @@ -86,18 +85,6 @@ func (s *BulkIndexByScrollFailure) UnmarshalJSON(data []byte) error { s.Status = f } - case "type": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Type", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Type = o - } } return nil @@ -109,3 +96,5 @@ func NewBulkIndexByScrollFailure() *BulkIndexByScrollFailure { return r } + +// false diff --git a/typedapi/types/bulkstats.go b/typedapi/types/bulkstats.go index bfcd679409..2a5c497c06 100644 --- a/typedapi/types/bulkstats.go +++ b/typedapi/types/bulkstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BulkStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L68-L78 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L71-L81 type BulkStats struct { AvgSize ByteSize `json:"avg_size,omitempty"` AvgSizeInBytes int64 `json:"avg_size_in_bytes"` @@ -145,3 +145,5 @@ func NewBulkStats() *BulkStats { return r } + +// false diff --git a/typedapi/types/bytenumberproperty.go b/typedapi/types/bytenumberproperty.go index 8b46b502cd..254c898eaf 100644 --- a/typedapi/types/bytenumberproperty.go +++ b/typedapi/types/bytenumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // ByteNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L172-L175 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L176-L179 type ByteNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,13 +48,13 @@ type ByteNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *byte `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *byte `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +163,313 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -543,301 +556,313 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -848,18 +873,6 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -874,6 +887,11 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -921,8 +939,8 @@ func (s ByteNumberProperty) MarshalJSON() ([]byte, error) { OnScriptError: s.OnScriptError, Properties: s.Properties, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -936,10 +954,20 @@ func (s ByteNumberProperty) MarshalJSON() ([]byte, error) { // NewByteNumberProperty returns a ByteNumberProperty. func NewByteNumberProperty() *ByteNumberProperty { r := &ByteNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ByteNumberPropertyVariant interface { + ByteNumberPropertyCaster() *ByteNumberProperty +} + +func (s *ByteNumberProperty) ByteNumberPropertyCaster() *ByteNumberProperty { + return s +} diff --git a/typedapi/types/bytesize.go b/typedapi/types/bytesize.go index 75877c9fd7..c7f665cf6f 100644 --- a/typedapi/types/bytesize.go +++ b/typedapi/types/bytesize.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L96-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L97-L98 type ByteSize any + +type ByteSizeVariant interface { + ByteSizeCaster() *ByteSize +} diff --git a/typedapi/types/bytesprocessor.go b/typedapi/types/bytesprocessor.go index f8dc5203f5..47533c739a 100644 --- a/typedapi/types/bytesprocessor.go +++ b/typedapi/types/bytesprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // BytesProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L392-L408 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L555-L571 type BytesProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -160,3 +160,13 @@ func NewBytesProcessor() *BytesProcessor { return r } + +// true + +type BytesProcessorVariant interface { + BytesProcessorCaster() *BytesProcessor +} + +func (s *BytesProcessor) BytesProcessorCaster() *BytesProcessor { + return s +} diff --git a/typedapi/types/cachequeries.go b/typedapi/types/cachequeries.go index e8324ea7b1..2758273b52 100644 --- a/typedapi/types/cachequeries.go +++ b/typedapi/types/cachequeries.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CacheQueries type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L407-L409 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L421-L423 type CacheQueries struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewCacheQueries() *CacheQueries { return r } + +// true + +type CacheQueriesVariant interface { + CacheQueriesCaster() *CacheQueries +} + +func (s *CacheQueries) CacheQueriesCaster() *CacheQueries { + return s +} diff --git a/typedapi/types/cachestats.go b/typedapi/types/cachestats.go index a88422fe8b..9d2533aadb 100644 --- a/typedapi/types/cachestats.go +++ b/typedapi/types/cachestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,13 +31,16 @@ import ( // CacheStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/stats/types.ts#L37-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/stats/types.ts#L38-L50 type CacheStats struct { - Count int `json:"count"` - Evictions int `json:"evictions"` - Hits int `json:"hits"` - Misses int `json:"misses"` - NodeId string `json:"node_id"` + Count int `json:"count"` + Evictions int `json:"evictions"` + Hits int `json:"hits"` + HitsTimeInMillis int64 `json:"hits_time_in_millis"` + Misses int `json:"misses"` + MissesTimeInMillis int64 `json:"misses_time_in_millis"` + NodeId string `json:"node_id"` + SizeInBytes int64 `json:"size_in_bytes"` } func (s *CacheStats) UnmarshalJSON(data []byte) error { @@ -103,6 +106,11 @@ func (s *CacheStats) UnmarshalJSON(data []byte) error { s.Hits = f } + case "hits_time_in_millis": + if err := dec.Decode(&s.HitsTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "HitsTimeInMillis", err) + } + case "misses": var tmp any @@ -119,11 +127,31 @@ func (s *CacheStats) UnmarshalJSON(data []byte) error { s.Misses = f } + case "misses_time_in_millis": + if err := dec.Decode(&s.MissesTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "MissesTimeInMillis", err) + } + case "node_id": if err := dec.Decode(&s.NodeId); err != nil { return fmt.Errorf("%s | %w", "NodeId", err) } + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + } } return nil @@ -135,3 +163,5 @@ func NewCacheStats() *CacheStats { return r } + +// false diff --git a/typedapi/types/calendar.go b/typedapi/types/calendar.go index 6808b97379..0e16f44963 100644 --- a/typedapi/types/calendar.go +++ b/typedapi/types/calendar.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Calendar type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_calendars/types.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_calendars/types.ts#L22-L29 type Calendar struct { // CalendarId A string that uniquely identifies a calendar. CalendarId string `json:"calendar_id"` @@ -89,3 +89,5 @@ func NewCalendar() *Calendar { return r } + +// false diff --git a/typedapi/types/calendarevent.go b/typedapi/types/calendarevent.go index 3ddf75961e..9ebe8f72db 100644 --- a/typedapi/types/calendarevent.go +++ b/typedapi/types/calendarevent.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CalendarEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/CalendarEvent.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/CalendarEvent.ts#L23-L33 type CalendarEvent struct { // CalendarId A string that uniquely identifies a calendar. CalendarId *string `json:"calendar_id,omitempty"` @@ -104,3 +104,13 @@ func NewCalendarEvent() *CalendarEvent { return r } + +// true + +type CalendarEventVariant interface { + CalendarEventCaster() *CalendarEvent +} + +func (s *CalendarEvent) CalendarEventCaster() *CalendarEvent { + return s +} diff --git a/typedapi/types/cardinalityaggregate.go b/typedapi/types/cardinalityaggregate.go index c9fa0387a5..3512e97f4c 100644 --- a/typedapi/types/cardinalityaggregate.go +++ b/typedapi/types/cardinalityaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CardinalityAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L138-L141 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L140-L143 type CardinalityAggregate struct { Meta Metadata `json:"meta,omitempty"` Value int64 `json:"value"` @@ -83,3 +83,5 @@ func NewCardinalityAggregate() *CardinalityAggregate { return r } + +// false diff --git a/typedapi/types/cardinalityaggregation.go b/typedapi/types/cardinalityaggregation.go index a48e3dc8d6..9f4c709f95 100644 --- a/typedapi/types/cardinalityaggregation.go +++ b/typedapi/types/cardinalityaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // CardinalityAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L87-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L87-L99 type CardinalityAggregation struct { // ExecutionHint Mechanism by which cardinality aggregations is run. ExecutionHint *cardinalityexecutionmode.CardinalityExecutionMode `json:"execution_hint,omitempty"` @@ -125,3 +125,13 @@ func NewCardinalityAggregation() *CardinalityAggregation { return r } + +// true + +type CardinalityAggregationVariant interface { + CardinalityAggregationCaster() *CardinalityAggregation +} + +func (s *CardinalityAggregation) CardinalityAggregationCaster() *CardinalityAggregation { + return s +} diff --git a/typedapi/types/catalananalyzer.go b/typedapi/types/catalananalyzer.go new file mode 100644 index 0000000000..8d1e6e6b62 --- /dev/null +++ b/typedapi/types/catalananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CatalanAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L93-L98 +type CatalanAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CatalanAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CatalanAnalyzer) MarshalJSON() ([]byte, error) { + type innerCatalanAnalyzer CatalanAnalyzer + tmp := innerCatalanAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "catalan" + + return json.Marshal(tmp) +} + +// NewCatalanAnalyzer returns a CatalanAnalyzer. +func NewCatalanAnalyzer() *CatalanAnalyzer { + r := &CatalanAnalyzer{} + + return r +} + +// true + +type CatalanAnalyzerVariant interface { + CatalanAnalyzerCaster() *CatalanAnalyzer +} + +func (s *CatalanAnalyzer) CatalanAnalyzerCaster() *CatalanAnalyzer { + return s +} diff --git a/typedapi/types/catanonalydetectorcolumns.go b/typedapi/types/catanonalydetectorcolumns.go index 6f4bc1e44a..3f928acddb 100644 --- a/typedapi/types/catanonalydetectorcolumns.go +++ b/typedapi/types/catanonalydetectorcolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,5 @@ import ( // CatAnonalyDetectorColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L402-L404 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L402-L404 type CatAnonalyDetectorColumns []catanomalydetectorcolumn.CatAnomalyDetectorColumn diff --git a/typedapi/types/catcomponenttemplate.go b/typedapi/types/catcomponenttemplate.go index 060091f977..c0faa6ba53 100644 --- a/typedapi/types/catcomponenttemplate.go +++ b/typedapi/types/catcomponenttemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,15 +31,15 @@ import ( // CatComponentTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/component_templates/types.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/component_templates/types.ts#L20-L28 type CatComponentTemplate struct { - AliasCount string `json:"alias_count"` - IncludedIn string `json:"included_in"` - MappingCount string `json:"mapping_count"` - MetadataCount string `json:"metadata_count"` - Name string `json:"name"` - SettingsCount string `json:"settings_count"` - Version string `json:"version"` + AliasCount string `json:"alias_count"` + IncludedIn string `json:"included_in"` + MappingCount string `json:"mapping_count"` + MetadataCount string `json:"metadata_count"` + Name string `json:"name"` + SettingsCount string `json:"settings_count"` + Version *string `json:"version,omitempty"` } func (s *CatComponentTemplate) UnmarshalJSON(data []byte) error { @@ -139,7 +139,7 @@ func (s *CatComponentTemplate) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Version = o + s.Version = &o } } @@ -152,3 +152,5 @@ func NewCatComponentTemplate() *CatComponentTemplate { return r } + +// false diff --git a/typedapi/types/catdatafeedcolumns.go b/typedapi/types/catdatafeedcolumns.go index 807f8707c3..2366e3cae3 100644 --- a/typedapi/types/catdatafeedcolumns.go +++ b/typedapi/types/catdatafeedcolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,5 @@ import ( // CatDatafeedColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L559-L559 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L559-L559 type CatDatafeedColumns []catdatafeedcolumn.CatDatafeedColumn diff --git a/typedapi/types/catdfacolumns.go b/typedapi/types/catdfacolumns.go index 7dd34ae262..130880e306 100644 --- a/typedapi/types/catdfacolumns.go +++ b/typedapi/types/catdfacolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,5 @@ import ( // CatDfaColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L558-L558 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L558-L558 type CatDfaColumns []catdfacolumn.CatDfaColumn diff --git a/typedapi/types/categorizationanalyzer.go b/typedapi/types/categorizationanalyzer.go index 64665fc9cc..ebe442eff4 100644 --- a/typedapi/types/categorizationanalyzer.go +++ b/typedapi/types/categorizationanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // CategorizationAnalyzerDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Analysis.ts#L181-L182 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Analysis.ts#L181-L182 type CategorizationAnalyzer any + +type CategorizationAnalyzerVariant interface { + CategorizationAnalyzerCaster() *CategorizationAnalyzer +} diff --git a/typedapi/types/categorizationanalyzerdefinition.go b/typedapi/types/categorizationanalyzerdefinition.go index 0a5ff9c998..ab727dcdc4 100644 --- a/typedapi/types/categorizationanalyzerdefinition.go +++ b/typedapi/types/categorizationanalyzerdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -24,12 +24,13 @@ import ( "bytes" "encoding/json" "errors" + "fmt" "io" ) // CategorizationAnalyzerDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Analysis.ts#L184-L197 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Analysis.ts#L184-L198 type CategorizationAnalyzerDefinition struct { // CharFilter One or more character filters. In addition to the built-in character filters, // other plugins can provide more character filters. If this property is not @@ -91,37 +92,37 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { case "html_strip": o := NewHtmlStripCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "html_strip", err) } s.CharFilter = append(s.CharFilter, *o) case "mapping": o := NewMappingCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "mapping", err) } s.CharFilter = append(s.CharFilter, *o) case "pattern_replace": o := NewPatternReplaceCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_replace", err) } s.CharFilter = append(s.CharFilter, *o) case "icu_normalizer": o := NewIcuNormalizationCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_normalizer", err) } s.CharFilter = append(s.CharFilter, *o) case "kuromoji_iteration_mark": o := NewKuromojiIterationMarkCharFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_iteration_mark", err) } s.CharFilter = append(s.CharFilter, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter = append(s.CharFilter, *o) } @@ -144,289 +145,289 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { case "asciifolding": o := NewAsciiFoldingTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "asciifolding", err) } s.Filter = append(s.Filter, *o) case "common_grams": o := NewCommonGramsTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "common_grams", err) } s.Filter = append(s.Filter, *o) case "condition": o := NewConditionTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "condition", err) } s.Filter = append(s.Filter, *o) case "delimited_payload": o := NewDelimitedPayloadTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "delimited_payload", err) } s.Filter = append(s.Filter, *o) case "edge_ngram": o := NewEdgeNGramTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "edge_ngram", err) } s.Filter = append(s.Filter, *o) case "elision": o := NewElisionTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "elision", err) } s.Filter = append(s.Filter, *o) case "fingerprint": o := NewFingerprintTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "fingerprint", err) } s.Filter = append(s.Filter, *o) case "hunspell": o := NewHunspellTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "hunspell", err) } s.Filter = append(s.Filter, *o) case "hyphenation_decompounder": o := NewHyphenationDecompounderTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "hyphenation_decompounder", err) } s.Filter = append(s.Filter, *o) case "keep_types": o := NewKeepTypesTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keep_types", err) } s.Filter = append(s.Filter, *o) case "keep": o := NewKeepWordsTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keep", err) } s.Filter = append(s.Filter, *o) case "keyword_marker": o := NewKeywordMarkerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword_marker", err) } s.Filter = append(s.Filter, *o) case "kstem": o := NewKStemTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kstem", err) } s.Filter = append(s.Filter, *o) case "length": o := NewLengthTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "length", err) } s.Filter = append(s.Filter, *o) case "limit": o := NewLimitTokenCountTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "limit", err) } s.Filter = append(s.Filter, *o) case "lowercase": o := NewLowercaseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "lowercase", err) } s.Filter = append(s.Filter, *o) case "multiplexer": o := NewMultiplexerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "multiplexer", err) } s.Filter = append(s.Filter, *o) case "ngram": o := NewNGramTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ngram", err) } s.Filter = append(s.Filter, *o) case "nori_part_of_speech": o := NewNoriPartOfSpeechTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nori_part_of_speech", err) } s.Filter = append(s.Filter, *o) case "pattern_capture": o := NewPatternCaptureTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_capture", err) } s.Filter = append(s.Filter, *o) case "pattern_replace": o := NewPatternReplaceTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern_replace", err) } s.Filter = append(s.Filter, *o) case "porter_stem": o := NewPorterStemTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "porter_stem", err) } s.Filter = append(s.Filter, *o) case "predicate_token_filter": o := NewPredicateTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "predicate_token_filter", err) } s.Filter = append(s.Filter, *o) case "remove_duplicates": o := NewRemoveDuplicatesTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "remove_duplicates", err) } s.Filter = append(s.Filter, *o) case "reverse": o := NewReverseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "reverse", err) } s.Filter = append(s.Filter, *o) case "shingle": o := NewShingleTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "shingle", err) } s.Filter = append(s.Filter, *o) case "snowball": o := NewSnowballTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "snowball", err) } s.Filter = append(s.Filter, *o) case "stemmer_override": o := NewStemmerOverrideTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stemmer_override", err) } s.Filter = append(s.Filter, *o) case "stemmer": o := NewStemmerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stemmer", err) } s.Filter = append(s.Filter, *o) case "stop": o := NewStopTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "stop", err) } s.Filter = append(s.Filter, *o) case "synonym_graph": o := NewSynonymGraphTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "synonym_graph", err) } s.Filter = append(s.Filter, *o) case "synonym": o := NewSynonymTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "synonym", err) } s.Filter = append(s.Filter, *o) case "trim": o := NewTrimTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "trim", err) } s.Filter = append(s.Filter, *o) case "truncate": o := NewTruncateTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "truncate", err) } s.Filter = append(s.Filter, *o) case "unique": o := NewUniqueTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "unique", err) } s.Filter = append(s.Filter, *o) case "uppercase": o := NewUppercaseTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "uppercase", err) } s.Filter = append(s.Filter, *o) case "word_delimiter_graph": o := NewWordDelimiterGraphTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "word_delimiter_graph", err) } s.Filter = append(s.Filter, *o) case "word_delimiter": o := NewWordDelimiterTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "word_delimiter", err) } s.Filter = append(s.Filter, *o) case "kuromoji_stemmer": o := NewKuromojiStemmerTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_stemmer", err) } s.Filter = append(s.Filter, *o) case "kuromoji_readingform": o := NewKuromojiReadingFormTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_readingform", err) } s.Filter = append(s.Filter, *o) case "kuromoji_part_of_speech": o := NewKuromojiPartOfSpeechTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_part_of_speech", err) } s.Filter = append(s.Filter, *o) case "icu_collation": o := NewIcuCollationTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_collation", err) } s.Filter = append(s.Filter, *o) case "icu_folding": o := NewIcuFoldingTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_folding", err) } s.Filter = append(s.Filter, *o) case "icu_normalizer": o := NewIcuNormalizationTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_normalizer", err) } s.Filter = append(s.Filter, *o) case "icu_transform": o := NewIcuTransformTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_transform", err) } s.Filter = append(s.Filter, *o) case "phonetic": o := NewPhoneticTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "phonetic", err) } s.Filter = append(s.Filter, *o) case "dictionary_decompounder": o := NewDictionaryDecompounderTokenFilter() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "dictionary_decompounder", err) } s.Filter = append(s.Filter, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter = append(s.Filter, *o) } @@ -447,90 +448,114 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { case "char_group": o := NewCharGroupTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "char_group", err) + } + s.Tokenizer = *o + case "classic": + o := NewClassicTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "classic", err) } s.Tokenizer = *o case "edge_ngram": o := NewEdgeNGramTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "edge_ngram", err) } s.Tokenizer = *o case "keyword": o := NewKeywordTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword", err) } s.Tokenizer = *o case "letter": o := NewLetterTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "letter", err) } s.Tokenizer = *o case "lowercase": o := NewLowercaseTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "lowercase", err) } s.Tokenizer = *o case "ngram": o := NewNGramTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ngram", err) } s.Tokenizer = *o - case "nori_tokenizer": - o := NewNoriTokenizer() + case "path_hierarchy": + o := NewPathHierarchyTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "path_hierarchy", err) } s.Tokenizer = *o - case "path_hierarchy": - o := NewPathHierarchyTokenizer() + case "pattern": + o := NewPatternTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "pattern", err) + } + s.Tokenizer = *o + case "simple_pattern": + o := NewSimplePatternTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "simple_pattern", err) + } + s.Tokenizer = *o + case "simple_pattern_split": + o := NewSimplePatternSplitTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "simple_pattern_split", err) } s.Tokenizer = *o case "standard": o := NewStandardTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "standard", err) + } + s.Tokenizer = *o + case "thai": + o := NewThaiTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "thai", err) } s.Tokenizer = *o case "uax_url_email": o := NewUaxEmailUrlTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "uax_url_email", err) } s.Tokenizer = *o case "whitespace": o := NewWhitespaceTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "whitespace", err) } s.Tokenizer = *o - case "kuromoji_tokenizer": - o := NewKuromojiTokenizer() + case "icu_tokenizer": + o := NewIcuTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_tokenizer", err) } s.Tokenizer = *o - case "pattern": - o := NewPatternTokenizer() + case "kuromoji_tokenizer": + o := NewKuromojiTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "kuromoji_tokenizer", err) } s.Tokenizer = *o - case "icu_tokenizer": - o := NewIcuTokenizer() + case "nori_tokenizer": + o := NewNoriTokenizer() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nori_tokenizer", err) } s.Tokenizer = *o default: if err := localDec.Decode(&s.Tokenizer); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } } @@ -545,3 +570,13 @@ func NewCategorizationAnalyzerDefinition() *CategorizationAnalyzerDefinition { return r } + +// true + +type CategorizationAnalyzerDefinitionVariant interface { + CategorizationAnalyzerDefinitionCaster() *CategorizationAnalyzerDefinition +} + +func (s *CategorizationAnalyzerDefinition) CategorizationAnalyzerDefinitionCaster() *CategorizationAnalyzerDefinition { + return s +} diff --git a/typedapi/types/categorizetextaggregation.go b/typedapi/types/categorizetextaggregation.go index 1a2f4299b3..e2c8005c7c 100644 --- a/typedapi/types/categorizetextaggregation.go +++ b/typedapi/types/categorizetextaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CategorizeTextAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1045-L1109 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1117-L1182 type CategorizeTextAggregation struct { // CategorizationAnalyzer The categorization analyzer specifies how the text is analyzed and tokenized // before being categorized. @@ -271,3 +271,13 @@ func NewCategorizeTextAggregation() *CategorizeTextAggregation { return r } + +// true + +type CategorizeTextAggregationVariant interface { + CategorizeTextAggregationCaster() *CategorizeTextAggregation +} + +func (s *CategorizeTextAggregation) CategorizeTextAggregationCaster() *CategorizeTextAggregation { + return s +} diff --git a/typedapi/types/categorizetextanalyzer.go b/typedapi/types/categorizetextanalyzer.go index 1d55034c99..5212b70038 100644 --- a/typedapi/types/categorizetextanalyzer.go +++ b/typedapi/types/categorizetextanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // CustomCategorizeTextAnalyzer // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1111-L1114 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1184-L1187 type CategorizeTextAnalyzer any + +type CategorizeTextAnalyzerVariant interface { + CategorizeTextAnalyzerCaster() *CategorizeTextAnalyzer +} diff --git a/typedapi/types/category.go b/typedapi/types/category.go index 6aa69b04ab..7eefca8e85 100644 --- a/typedapi/types/category.go +++ b/typedapi/types/category.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Category type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Category.ts#L23-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Category.ts#L23-L49 type Category struct { // CategoryId A unique identifier for the category. category_id is unique at the job level, // even when per-partition categorization is enabled. @@ -103,16 +103,9 @@ func (s *Category) UnmarshalJSON(data []byte) error { } case "grok_pattern": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.GrokPattern); err != nil { return fmt.Errorf("%s | %w", "GrokPattern", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.GrokPattern = &o case "job_id": if err := dec.Decode(&s.JobId); err != nil { @@ -239,3 +232,5 @@ func NewCategory() *Category { return r } + +// false diff --git a/typedapi/types/cattrainedmodelscolumns.go b/typedapi/types/cattrainedmodelscolumns.go index 269374f417..187993e6f2 100644 --- a/typedapi/types/cattrainedmodelscolumns.go +++ b/typedapi/types/cattrainedmodelscolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,5 @@ import ( // CatTrainedModelsColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L636-L638 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L636-L638 type CatTrainedModelsColumns []cattrainedmodelscolumn.CatTrainedModelsColumn diff --git a/typedapi/types/cattransformcolumns.go b/typedapi/types/cattransformcolumns.go index 1b021ec2ba..0dd3cb2724 100644 --- a/typedapi/types/cattransformcolumns.go +++ b/typedapi/types/cattransformcolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,5 @@ import ( // CatTransformColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L845-L845 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L845-L845 type CatTransformColumns []cattransformcolumn.CatTransformColumn diff --git a/typedapi/types/ccr.go b/typedapi/types/ccr.go index 2da846d135..437fe49771 100644 --- a/typedapi/types/ccr.go +++ b/typedapi/types/ccr.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Ccr type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L334-L337 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L344-L347 type Ccr struct { AutoFollowPatternsCount int `json:"auto_follow_patterns_count"` Available bool `json:"available"` @@ -125,3 +125,5 @@ func NewCcr() *Ccr { return r } + +// false diff --git a/typedapi/types/ccrshardstats.go b/typedapi/types/ccrshardstats.go index 93747a7e80..590585b9b2 100644 --- a/typedapi/types/ccrshardstats.go +++ b/typedapi/types/ccrshardstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,41 +31,79 @@ import ( // CcrShardStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/_types/FollowIndexStats.ts#L35-L69 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/_types/FollowIndexStats.ts#L37-L109 type CcrShardStats struct { - BytesRead int64 `json:"bytes_read"` - FailedReadRequests int64 `json:"failed_read_requests"` - FailedWriteRequests int64 `json:"failed_write_requests"` - FatalException *ErrorCause `json:"fatal_exception,omitempty"` - FollowerAliasesVersion int64 `json:"follower_aliases_version"` - FollowerGlobalCheckpoint int64 `json:"follower_global_checkpoint"` - FollowerIndex string `json:"follower_index"` - FollowerMappingVersion int64 `json:"follower_mapping_version"` - FollowerMaxSeqNo int64 `json:"follower_max_seq_no"` - FollowerSettingsVersion int64 `json:"follower_settings_version"` - LastRequestedSeqNo int64 `json:"last_requested_seq_no"` - LeaderGlobalCheckpoint int64 `json:"leader_global_checkpoint"` - LeaderIndex string `json:"leader_index"` - LeaderMaxSeqNo int64 `json:"leader_max_seq_no"` - OperationsRead int64 `json:"operations_read"` - OperationsWritten int64 `json:"operations_written"` - OutstandingReadRequests int `json:"outstanding_read_requests"` - OutstandingWriteRequests int `json:"outstanding_write_requests"` - ReadExceptions []ReadException `json:"read_exceptions"` - RemoteCluster string `json:"remote_cluster"` - ShardId int `json:"shard_id"` - SuccessfulReadRequests int64 `json:"successful_read_requests"` - SuccessfulWriteRequests int64 `json:"successful_write_requests"` - TimeSinceLastRead Duration `json:"time_since_last_read,omitempty"` - TimeSinceLastReadMillis int64 `json:"time_since_last_read_millis"` - TotalReadRemoteExecTime Duration `json:"total_read_remote_exec_time,omitempty"` - TotalReadRemoteExecTimeMillis int64 `json:"total_read_remote_exec_time_millis"` - TotalReadTime Duration `json:"total_read_time,omitempty"` - TotalReadTimeMillis int64 `json:"total_read_time_millis"` - TotalWriteTime Duration `json:"total_write_time,omitempty"` - TotalWriteTimeMillis int64 `json:"total_write_time_millis"` - WriteBufferOperationCount int64 `json:"write_buffer_operation_count"` - WriteBufferSizeInBytes ByteSize `json:"write_buffer_size_in_bytes"` + // BytesRead The total of transferred bytes read from the leader. + // This is only an estimate and does not account for compression if enabled. + BytesRead int64 `json:"bytes_read"` + // FailedReadRequests The number of failed reads. + FailedReadRequests int64 `json:"failed_read_requests"` + // FailedWriteRequests The number of failed bulk write requests on the follower. + FailedWriteRequests int64 `json:"failed_write_requests"` + FatalException *ErrorCause `json:"fatal_exception,omitempty"` + // FollowerAliasesVersion The index aliases version the follower is synced up to. + FollowerAliasesVersion int64 `json:"follower_aliases_version"` + // FollowerGlobalCheckpoint The current global checkpoint on the follower. + // The difference between the `leader_global_checkpoint` and the + // `follower_global_checkpoint` is an indication of how much the follower is + // lagging the leader. + FollowerGlobalCheckpoint int64 `json:"follower_global_checkpoint"` + // FollowerIndex The name of the follower index. + FollowerIndex string `json:"follower_index"` + // FollowerMappingVersion The mapping version the follower is synced up to. + FollowerMappingVersion int64 `json:"follower_mapping_version"` + // FollowerMaxSeqNo The current maximum sequence number on the follower. + FollowerMaxSeqNo int64 `json:"follower_max_seq_no"` + // FollowerSettingsVersion The index settings version the follower is synced up to. + FollowerSettingsVersion int64 `json:"follower_settings_version"` + // LastRequestedSeqNo The starting sequence number of the last batch of operations requested from + // the leader. + LastRequestedSeqNo int64 `json:"last_requested_seq_no"` + // LeaderGlobalCheckpoint The current global checkpoint on the leader known to the follower task. + LeaderGlobalCheckpoint int64 `json:"leader_global_checkpoint"` + // LeaderIndex The name of the index in the leader cluster being followed. + LeaderIndex string `json:"leader_index"` + // LeaderMaxSeqNo The current maximum sequence number on the leader known to the follower task. + LeaderMaxSeqNo int64 `json:"leader_max_seq_no"` + // OperationsRead The total number of operations read from the leader. + OperationsRead int64 `json:"operations_read"` + // OperationsWritten The number of operations written on the follower. + OperationsWritten int64 `json:"operations_written"` + // OutstandingReadRequests The number of active read requests from the follower. + OutstandingReadRequests int `json:"outstanding_read_requests"` + // OutstandingWriteRequests The number of active bulk write requests on the follower. + OutstandingWriteRequests int `json:"outstanding_write_requests"` + // ReadExceptions An array of objects representing failed reads. + ReadExceptions []ReadException `json:"read_exceptions"` + // RemoteCluster The remote cluster containing the leader index. + RemoteCluster string `json:"remote_cluster"` + // ShardId The numerical shard ID, with values from 0 to one less than the number of + // replicas. + ShardId int `json:"shard_id"` + // SuccessfulReadRequests The number of successful fetches. + SuccessfulReadRequests int64 `json:"successful_read_requests"` + // SuccessfulWriteRequests The number of bulk write requests run on the follower. + SuccessfulWriteRequests int64 `json:"successful_write_requests"` + TimeSinceLastRead Duration `json:"time_since_last_read,omitempty"` + // TimeSinceLastReadMillis The number of milliseconds since a read request was sent to the leader. + // When the follower is caught up to the leader, this number will increase up to + // the configured `read_poll_timeout` at which point another read request will + // be sent to the leader. + TimeSinceLastReadMillis int64 `json:"time_since_last_read_millis"` + TotalReadRemoteExecTime Duration `json:"total_read_remote_exec_time,omitempty"` + // TotalReadRemoteExecTimeMillis The total time reads spent running on the remote cluster. + TotalReadRemoteExecTimeMillis int64 `json:"total_read_remote_exec_time_millis"` + TotalReadTime Duration `json:"total_read_time,omitempty"` + // TotalReadTimeMillis The total time reads were outstanding, measured from the time a read was sent + // to the leader to the time a reply was returned to the follower. + TotalReadTimeMillis int64 `json:"total_read_time_millis"` + TotalWriteTime Duration `json:"total_write_time,omitempty"` + // TotalWriteTimeMillis The total time spent writing on the follower. + TotalWriteTimeMillis int64 `json:"total_write_time_millis"` + // WriteBufferOperationCount The number of write operations queued on the follower. + WriteBufferOperationCount int64 `json:"write_buffer_operation_count"` + // WriteBufferSizeInBytes The total number of bytes of operations currently queued for writing. + WriteBufferSizeInBytes ByteSize `json:"write_buffer_size_in_bytes"` } func (s *CcrShardStats) UnmarshalJSON(data []byte) error { @@ -413,3 +451,5 @@ func NewCcrShardStats() *CcrShardStats { return r } + +// false diff --git a/typedapi/types/certificateinformation.go b/typedapi/types/certificateinformation.go index 4babc0a85a..caa5487eca 100644 --- a/typedapi/types/certificateinformation.go +++ b/typedapi/types/certificateinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,16 +31,28 @@ import ( // CertificateInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ssl/certificates/types.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ssl/certificates/types.ts#L22-L57 type CertificateInformation struct { - Alias *string `json:"alias,omitempty"` - Expiry DateTime `json:"expiry"` - Format string `json:"format"` - HasPrivateKey bool `json:"has_private_key"` - Issuer *string `json:"issuer,omitempty"` - Path string `json:"path"` - SerialNumber string `json:"serial_number"` - SubjectDn string `json:"subject_dn"` + // Alias If the path refers to a container file (a jks keystore, or a PKCS#12 file), + // it is the alias of the certificate. + // Otherwise, it is null. + Alias *string `json:"alias,omitempty"` + // Expiry The ISO formatted date of the certificate's expiry (not-after) date. + Expiry DateTime `json:"expiry"` + // Format The format of the file. + // Valid values include `jks`, `PKCS12`, and `PEM`. + Format string `json:"format"` + // HasPrivateKey Indicates whether Elasticsearch has access to the private key for this + // certificate. + HasPrivateKey bool `json:"has_private_key"` + // Issuer The Distinguished Name of the certificate's issuer. + Issuer *string `json:"issuer,omitempty"` + // Path The path to the certificate, as configured in the `elasticsearch.yml` file. + Path string `json:"path"` + // SerialNumber The hexadecimal representation of the certificate's serial number. + SerialNumber string `json:"serial_number"` + // SubjectDn The Distinguished Name of the certificate's subject. + SubjectDn string `json:"subject_dn"` } func (s *CertificateInformation) UnmarshalJSON(data []byte) error { @@ -160,3 +172,5 @@ func NewCertificateInformation() *CertificateInformation { return r } + +// false diff --git a/typedapi/types/cgroup.go b/typedapi/types/cgroup.go index 8f54679f05..59aa938548 100644 --- a/typedapi/types/cgroup.go +++ b/typedapi/types/cgroup.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Cgroup type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L461-L474 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L497-L510 type Cgroup struct { // Cpu Contains statistics about `cpu` control group for the node. Cpu *CgroupCpu `json:"cpu,omitempty"` @@ -38,3 +38,5 @@ func NewCgroup() *Cgroup { return r } + +// false diff --git a/typedapi/types/cgroupcpu.go b/typedapi/types/cgroupcpu.go index bef77b70e1..713c9fdd02 100644 --- a/typedapi/types/cgroupcpu.go +++ b/typedapi/types/cgroupcpu.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CgroupCpu type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L487-L504 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L523-L540 type CgroupCpu struct { // CfsPeriodMicros The period of time, in microseconds, for how regularly all tasks in the same // cgroup as the Elasticsearch process should have their access to CPU resources @@ -122,3 +122,5 @@ func NewCgroupCpu() *CgroupCpu { return r } + +// false diff --git a/typedapi/types/cgroupcpustat.go b/typedapi/types/cgroupcpustat.go index 216a6cc3a2..4aac3e9595 100644 --- a/typedapi/types/cgroupcpustat.go +++ b/typedapi/types/cgroupcpustat.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CgroupCpuStat type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L506-L519 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L542-L555 type CgroupCpuStat struct { // NumberOfElapsedPeriods The number of reporting periods (as specified by `cfs_period_micros`) that // have elapsed. @@ -105,3 +105,5 @@ func NewCgroupCpuStat() *CgroupCpuStat { return r } + +// false diff --git a/typedapi/types/cgroupmemory.go b/typedapi/types/cgroupmemory.go index f57c5b58ee..600df87f56 100644 --- a/typedapi/types/cgroupmemory.go +++ b/typedapi/types/cgroupmemory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CgroupMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L521-L537 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L557-L573 type CgroupMemory struct { // ControlGroup The `memory` control group to which the Elasticsearch process belongs. ControlGroup *string `json:"control_group,omitempty"` @@ -111,3 +111,5 @@ func NewCgroupMemory() *CgroupMemory { return r } + +// false diff --git a/typedapi/types/chaininput.go b/typedapi/types/chaininput.go index 76719ad643..30ac1aaa79 100644 --- a/typedapi/types/chaininput.go +++ b/typedapi/types/chaininput.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ChainInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L35-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L35-L37 type ChainInput struct { Inputs []map[string]WatcherInput `json:"inputs"` } @@ -33,3 +33,13 @@ func NewChainInput() *ChainInput { return r } + +// true + +type ChainInputVariant interface { + ChainInputCaster() *ChainInput +} + +func (s *ChainInput) ChainInputCaster() *ChainInput { + return s +} diff --git a/typedapi/types/charfilter.go b/typedapi/types/charfilter.go index ca2b2fba11..d0e0c41836 100644 --- a/typedapi/types/charfilter.go +++ b/typedapi/types/charfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // CharFilterDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/char_filters.ts#L28-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/char_filters.ts#L28-L33 type CharFilter any + +type CharFilterVariant interface { + CharFilterCaster() *CharFilter +} diff --git a/typedapi/types/charfilterdefinition.go b/typedapi/types/charfilterdefinition.go index 40c0f54ce9..0932c01a41 100644 --- a/typedapi/types/charfilterdefinition.go +++ b/typedapi/types/charfilterdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,5 +28,9 @@ package types // IcuNormalizationCharFilter // KuromojiIterationMarkCharFilter // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/char_filters.ts#L32-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/char_filters.ts#L35-L44 type CharFilterDefinition any + +type CharFilterDefinitionVariant interface { + CharFilterDefinitionCaster() *CharFilterDefinition +} diff --git a/typedapi/types/charfilterdetail.go b/typedapi/types/charfilterdetail.go index fbc89c1858..9687d0c8d7 100644 --- a/typedapi/types/charfilterdetail.go +++ b/typedapi/types/charfilterdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CharFilterDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/analyze/types.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/analyze/types.ts#L46-L49 type CharFilterDetail struct { FilteredText []string `json:"filtered_text"` Name string `json:"name"` @@ -80,3 +80,5 @@ func NewCharFilterDetail() *CharFilterDetail { return r } + +// false diff --git a/typedapi/types/charfiltertypes.go b/typedapi/types/charfiltertypes.go index c446cda113..fb9f9b3176 100644 --- a/typedapi/types/charfiltertypes.go +++ b/typedapi/types/charfiltertypes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // CharFilterTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L228-L261 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L228-L261 type CharFilterTypes struct { // AnalyzerTypes Contains statistics about analyzer types used in selected nodes. AnalyzerTypes []FieldTypes `json:"analyzer_types"` @@ -48,3 +48,5 @@ func NewCharFilterTypes() *CharFilterTypes { return r } + +// false diff --git a/typedapi/types/chargrouptokenizer.go b/typedapi/types/chargrouptokenizer.go index 211d0db2f2..3f94c21eed 100644 --- a/typedapi/types/chargrouptokenizer.go +++ b/typedapi/types/chargrouptokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CharGroupTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L56-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L31-L38 type CharGroupTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` TokenizeOnChars []string `json:"tokenize_on_chars"` @@ -111,3 +111,13 @@ func NewCharGroupTokenizer() *CharGroupTokenizer { return r } + +// true + +type CharGroupTokenizerVariant interface { + CharGroupTokenizerCaster() *CharGroupTokenizer +} + +func (s *CharGroupTokenizer) CharGroupTokenizerCaster() *CharGroupTokenizer { + return s +} diff --git a/typedapi/types/checkpointing.go b/typedapi/types/checkpointing.go index c0b8cb830a..45d6c74198 100644 --- a/typedapi/types/checkpointing.go +++ b/typedapi/types/checkpointing.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Checkpointing type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/get_transform_stats/types.ts#L85-L92 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/get_transform_stats/types.ts#L85-L92 type Checkpointing struct { ChangesLastDetectedAt *int64 `json:"changes_last_detected_at,omitempty"` ChangesLastDetectedAtDateTime DateTime `json:"changes_last_detected_at_date_time,omitempty"` @@ -127,3 +127,5 @@ func NewCheckpointing() *Checkpointing { return r } + +// false diff --git a/typedapi/types/checkpointstats.go b/typedapi/types/checkpointstats.go index 27d28bccb1..b072d2103f 100644 --- a/typedapi/types/checkpointstats.go +++ b/typedapi/types/checkpointstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CheckpointStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/get_transform_stats/types.ts#L76-L83 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/get_transform_stats/types.ts#L76-L83 type CheckpointStats struct { Checkpoint int64 `json:"checkpoint"` CheckpointProgress *TransformProgress `json:"checkpoint_progress,omitempty"` @@ -107,3 +107,5 @@ func NewCheckpointStats() *CheckpointStats { return r } + +// false diff --git a/typedapi/types/childrenaggregate.go b/typedapi/types/childrenaggregate.go index 44bd0c3d2e..335819c406 100644 --- a/typedapi/types/childrenaggregate.go +++ b/typedapi/types/childrenaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // ChildrenAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L783-L784 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L888-L892 type ChildrenAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *ChildrenAggregate) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s ChildrenAggregate) MarshalJSON() ([]byte, error) { // NewChildrenAggregate returns a ChildrenAggregate. func NewChildrenAggregate() *ChildrenAggregate { r := &ChildrenAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/childrenaggregation.go b/typedapi/types/childrenaggregation.go index b2f1b14d72..1bbf599144 100644 --- a/typedapi/types/childrenaggregation.go +++ b/typedapi/types/childrenaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ChildrenAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L113-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L121-L126 type ChildrenAggregation struct { // Type The child type that should be selected. Type *string `json:"type,omitempty"` @@ -67,3 +67,13 @@ func NewChildrenAggregation() *ChildrenAggregation { return r } + +// true + +type ChildrenAggregationVariant interface { + ChildrenAggregationCaster() *ChildrenAggregation +} + +func (s *ChildrenAggregation) ChildrenAggregationCaster() *ChildrenAggregation { + return s +} diff --git a/typedapi/types/chineseanalyzer.go b/typedapi/types/chineseanalyzer.go new file mode 100644 index 0000000000..288ba426a8 --- /dev/null +++ b/typedapi/types/chineseanalyzer.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ChineseAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L100-L104 +type ChineseAnalyzer struct { + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ChineseAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ChineseAnalyzer) MarshalJSON() ([]byte, error) { + type innerChineseAnalyzer ChineseAnalyzer + tmp := innerChineseAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "chinese" + + return json.Marshal(tmp) +} + +// NewChineseAnalyzer returns a ChineseAnalyzer. +func NewChineseAnalyzer() *ChineseAnalyzer { + r := &ChineseAnalyzer{} + + return r +} + +// true + +type ChineseAnalyzerVariant interface { + ChineseAnalyzerCaster() *ChineseAnalyzer +} + +func (s *ChineseAnalyzer) ChineseAnalyzerCaster() *ChineseAnalyzer { + return s +} diff --git a/typedapi/types/chisquareheuristic.go b/typedapi/types/chisquareheuristic.go index 3842cf16d9..23a548a56d 100644 --- a/typedapi/types/chisquareheuristic.go +++ b/typedapi/types/chisquareheuristic.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ChiSquareHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L737-L746 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L782-L791 type ChiSquareHeuristic struct { // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a // different set of documents that you want to compare to. @@ -95,3 +95,13 @@ func NewChiSquareHeuristic() *ChiSquareHeuristic { return r } + +// true + +type ChiSquareHeuristicVariant interface { + ChiSquareHeuristicCaster() *ChiSquareHeuristic +} + +func (s *ChiSquareHeuristic) ChiSquareHeuristicCaster() *ChiSquareHeuristic { + return s +} diff --git a/typedapi/types/chunkingconfig.go b/typedapi/types/chunkingconfig.go index e3186c738d..6fb1c5c82b 100644 --- a/typedapi/types/chunkingconfig.go +++ b/typedapi/types/chunkingconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // ChunkingConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L238-L251 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L251-L264 type ChunkingConfig struct { // Mode If the mode is `auto`, the chunk size is dynamically calculated; // this is the recommended value when the datafeed does not use aggregations. @@ -82,3 +82,13 @@ func NewChunkingConfig() *ChunkingConfig { return r } + +// true + +type ChunkingConfigVariant interface { + ChunkingConfigCaster() *ChunkingConfig +} + +func (s *ChunkingConfig) ChunkingConfigCaster() *ChunkingConfig { + return s +} diff --git a/typedapi/types/circleprocessor.go b/typedapi/types/circleprocessor.go index 06a022fb9d..869df7a0a6 100644 --- a/typedapi/types/circleprocessor.go +++ b/typedapi/types/circleprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // CircleProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L410-L433 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L573-L596 type CircleProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -191,3 +191,13 @@ func NewCircleProcessor() *CircleProcessor { return r } + +// true + +type CircleProcessorVariant interface { + CircleProcessorCaster() *CircleProcessor +} + +func (s *CircleProcessor) CircleProcessorCaster() *CircleProcessor { + return s +} diff --git a/typedapi/types/cjkanalyzer.go b/typedapi/types/cjkanalyzer.go new file mode 100644 index 0000000000..9a023d756a --- /dev/null +++ b/typedapi/types/cjkanalyzer.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CjkAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L106-L110 +type CjkAnalyzer struct { + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CjkAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CjkAnalyzer) MarshalJSON() ([]byte, error) { + type innerCjkAnalyzer CjkAnalyzer + tmp := innerCjkAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "cjk" + + return json.Marshal(tmp) +} + +// NewCjkAnalyzer returns a CjkAnalyzer. +func NewCjkAnalyzer() *CjkAnalyzer { + r := &CjkAnalyzer{} + + return r +} + +// true + +type CjkAnalyzerVariant interface { + CjkAnalyzerCaster() *CjkAnalyzer +} + +func (s *CjkAnalyzer) CjkAnalyzerCaster() *CjkAnalyzer { + return s +} diff --git a/typedapi/types/classictokenizer.go b/typedapi/types/classictokenizer.go new file mode 100644 index 0000000000..7223f72699 --- /dev/null +++ b/typedapi/types/classictokenizer.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClassicTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L40-L46 +type ClassicTokenizer struct { + MaxTokenLength *int `json:"max_token_length,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ClassicTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTokenLength", err) + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ClassicTokenizer) MarshalJSON() ([]byte, error) { + type innerClassicTokenizer ClassicTokenizer + tmp := innerClassicTokenizer{ + MaxTokenLength: s.MaxTokenLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "classic" + + return json.Marshal(tmp) +} + +// NewClassicTokenizer returns a ClassicTokenizer. +func NewClassicTokenizer() *ClassicTokenizer { + r := &ClassicTokenizer{} + + return r +} + +// true + +type ClassicTokenizerVariant interface { + ClassicTokenizerCaster() *ClassicTokenizer +} + +func (s *ClassicTokenizer) ClassicTokenizerCaster() *ClassicTokenizer { + return s +} diff --git a/typedapi/types/classificationinferenceoptions.go b/typedapi/types/classificationinferenceoptions.go index ad2dc0d04e..0db7382a68 100644 --- a/typedapi/types/classificationinferenceoptions.go +++ b/typedapi/types/classificationinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClassificationInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L93-L108 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L93-L108 type ClassificationInferenceOptions struct { // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. NumTopClasses *int `json:"num_top_classes,omitempty"` @@ -143,3 +143,13 @@ func NewClassificationInferenceOptions() *ClassificationInferenceOptions { return r } + +// true + +type ClassificationInferenceOptionsVariant interface { + ClassificationInferenceOptionsCaster() *ClassificationInferenceOptions +} + +func (s *ClassificationInferenceOptions) ClassificationInferenceOptionsCaster() *ClassificationInferenceOptions { + return s +} diff --git a/typedapi/types/cleanuprepositoryresults.go b/typedapi/types/cleanuprepositoryresults.go index 2f56a559a3..6cfe759dfe 100644 --- a/typedapi/types/cleanuprepositoryresults.go +++ b/typedapi/types/cleanuprepositoryresults.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CleanupRepositoryResults type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L29-L34 type CleanupRepositoryResults struct { // DeletedBlobs Number of binary large objects (blobs) removed during cleanup. DeletedBlobs int64 `json:"deleted_blobs"` @@ -95,3 +95,5 @@ func NewCleanupRepositoryResults() *CleanupRepositoryResults { return r } + +// false diff --git a/typedapi/types/client.go b/typedapi/types/client.go index 8c4459473a..13a02ff078 100644 --- a/typedapi/types/client.go +++ b/typedapi/types/client.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Client type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L649-L696 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L720-L767 type Client struct { // Agent Reported agent for the HTTP client. // If unavailable, this property is not included in the response. @@ -235,3 +235,5 @@ func NewClient() *Client { return r } + +// false diff --git a/typedapi/types/closeindexresult.go b/typedapi/types/closeindexresult.go index d0ec9c6ba3..81d2475457 100644 --- a/typedapi/types/closeindexresult.go +++ b/typedapi/types/closeindexresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CloseIndexResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/close/CloseIndexResponse.ts#L32-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/close/CloseIndexResponse.ts#L32-L35 type CloseIndexResult struct { Closed bool `json:"closed"` Shards map[string]CloseShardResult `json:"shards,omitempty"` @@ -82,8 +82,10 @@ func (s *CloseIndexResult) UnmarshalJSON(data []byte) error { // NewCloseIndexResult returns a CloseIndexResult. func NewCloseIndexResult() *CloseIndexResult { r := &CloseIndexResult{ - Shards: make(map[string]CloseShardResult, 0), + Shards: make(map[string]CloseShardResult), } return r } + +// false diff --git a/typedapi/types/closeshardresult.go b/typedapi/types/closeshardresult.go index 1d1506c690..8299f8cb13 100644 --- a/typedapi/types/closeshardresult.go +++ b/typedapi/types/closeshardresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // CloseShardResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/close/CloseIndexResponse.ts#L37-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/close/CloseIndexResponse.ts#L37-L39 type CloseShardResult struct { Failures []ShardFailure `json:"failures"` } @@ -33,3 +33,5 @@ func NewCloseShardResult() *CloseShardResult { return r } + +// false diff --git a/typedapi/types/clusterappliedstats.go b/typedapi/types/clusterappliedstats.go index 67b22d663b..b8b6249402 100644 --- a/typedapi/types/clusterappliedstats.go +++ b/typedapi/types/clusterappliedstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ClusterAppliedStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L221-L223 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L221-L223 type ClusterAppliedStats struct { Recordings []Recording `json:"recordings,omitempty"` } @@ -33,3 +33,5 @@ func NewClusterAppliedStats() *ClusterAppliedStats { return r } + +// false diff --git a/typedapi/types/clustercomponenttemplate.go b/typedapi/types/clustercomponenttemplate.go index 7d2b66e928..e8157a53e8 100644 --- a/typedapi/types/clustercomponenttemplate.go +++ b/typedapi/types/clustercomponenttemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ClusterComponentTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/_types/ComponentTemplate.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/_types/ComponentTemplate.ts#L27-L30 type ClusterComponentTemplate struct { ComponentTemplate ComponentTemplateNode `json:"component_template"` Name string `json:"name"` @@ -72,3 +72,5 @@ func NewClusterComponentTemplate() *ClusterComponentTemplate { return r } + +// false diff --git a/typedapi/types/clusterdetails.go b/typedapi/types/clusterdetails.go index eac4750f15..a6b9774a24 100644 --- a/typedapi/types/clusterdetails.go +++ b/typedapi/types/clusterdetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // ClusterDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L45-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L45-L52 type ClusterDetails struct { Failures []ShardFailure `json:"failures,omitempty"` Indices string `json:"indices"` @@ -115,3 +115,5 @@ func NewClusterDetails() *ClusterDetails { return r } + +// false diff --git a/typedapi/types/clusterfilesystem.go b/typedapi/types/clusterfilesystem.go index 137c64babc..d909ef0753 100644 --- a/typedapi/types/clusterfilesystem.go +++ b/typedapi/types/clusterfilesystem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterFileSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L34-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L34-L49 type ClusterFileSystem struct { // AvailableInBytes Total number of bytes available to JVM in file stores across all selected // nodes. @@ -117,3 +117,5 @@ func NewClusterFileSystem() *ClusterFileSystem { return r } + +// false diff --git a/typedapi/types/clusterindexingpressure.go b/typedapi/types/clusterindexingpressure.go index 74ce82cab5..589f4971b8 100644 --- a/typedapi/types/clusterindexingpressure.go +++ b/typedapi/types/clusterindexingpressure.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ClusterIndexingPressure type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L570-L572 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L570-L572 type ClusterIndexingPressure struct { Memory ClusterPressureMemory `json:"memory"` } @@ -33,3 +33,5 @@ func NewClusterIndexingPressure() *ClusterIndexingPressure { return r } + +// false diff --git a/typedapi/types/clusterindices.go b/typedapi/types/clusterindices.go index b6919380dc..f122ee91a4 100644 --- a/typedapi/types/clusterindices.go +++ b/typedapi/types/clusterindices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterIndices type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L74-L107 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L74-L107 type ClusterIndices struct { // Analysis Contains statistics about analyzers and analyzer components used in selected // nodes. @@ -150,3 +150,5 @@ func NewClusterIndices() *ClusterIndices { return r } + +// false diff --git a/typedapi/types/clusterindicesshards.go b/typedapi/types/clusterindicesshards.go index 838d40cfe1..f342b3ade1 100644 --- a/typedapi/types/clusterindicesshards.go +++ b/typedapi/types/clusterindicesshards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterIndicesShards type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L60-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L60-L72 type ClusterIndicesShards struct { // Index Contains statistics about shards assigned to selected nodes. Index *ClusterIndicesShardsIndex `json:"index,omitempty"` @@ -122,3 +122,5 @@ func NewClusterIndicesShards() *ClusterIndicesShards { return r } + +// false diff --git a/typedapi/types/clusterindicesshardsindex.go b/typedapi/types/clusterindicesshardsindex.go index 5d527608d9..c2f414df81 100644 --- a/typedapi/types/clusterindicesshardsindex.go +++ b/typedapi/types/clusterindicesshardsindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ClusterIndicesShardsIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L51-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L51-L58 type ClusterIndicesShardsIndex struct { // Primaries Contains statistics about the number of primary shards assigned to selected // nodes. @@ -40,3 +40,5 @@ func NewClusterIndicesShardsIndex() *ClusterIndicesShardsIndex { return r } + +// false diff --git a/typedapi/types/clusterinfo.go b/typedapi/types/clusterinfo.go index 0c858fc2b7..16d4c5c29e 100644 --- a/typedapi/types/clusterinfo.go +++ b/typedapi/types/clusterinfo.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ClusterInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L48-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L49-L55 type ClusterInfo struct { Nodes map[string]NodeDiskUsage `json:"nodes"` ReservedSizes []ReservedSize `json:"reserved_sizes"` @@ -34,11 +34,13 @@ type ClusterInfo struct { // NewClusterInfo returns a ClusterInfo. func NewClusterInfo() *ClusterInfo { r := &ClusterInfo{ - Nodes: make(map[string]NodeDiskUsage, 0), - ShardDataSetSizes: make(map[string]string, 0), - ShardPaths: make(map[string]string, 0), - ShardSizes: make(map[string]int64, 0), + Nodes: make(map[string]NodeDiskUsage), + ShardDataSetSizes: make(map[string]string), + ShardPaths: make(map[string]string), + ShardSizes: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/clusterinfotargets.go b/typedapi/types/clusterinfotargets.go index 00f4a974b8..ca7ac21872 100644 --- a/typedapi/types/clusterinfotargets.go +++ b/typedapi/types/clusterinfotargets.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,5 @@ import ( // ClusterInfoTargets type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L389-L389 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L393-L393 type ClusterInfoTargets []clusterinfotarget.ClusterInfoTarget diff --git a/typedapi/types/clusteringest.go b/typedapi/types/clusteringest.go index 394809cd15..9797a0ebd3 100644 --- a/typedapi/types/clusteringest.go +++ b/typedapi/types/clusteringest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L270-L273 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L270-L273 type ClusterIngest struct { NumberOfPipelines int `json:"number_of_pipelines"` ProcessorStats map[string]ClusterProcessor `json:"processor_stats"` @@ -84,8 +84,10 @@ func (s *ClusterIngest) UnmarshalJSON(data []byte) error { // NewClusterIngest returns a ClusterIngest. func NewClusterIngest() *ClusterIngest { r := &ClusterIngest{ - ProcessorStats: make(map[string]ClusterProcessor, 0), + ProcessorStats: make(map[string]ClusterProcessor), } return r } + +// false diff --git a/typedapi/types/clusterjvm.go b/typedapi/types/clusterjvm.go index 2bf3fbfad7..1b7b15dce3 100644 --- a/typedapi/types/clusterjvm.go +++ b/typedapi/types/clusterjvm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterJvm type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L275-L292 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L275-L292 type ClusterJvm struct { // MaxUptimeInMillis Uptime duration, in milliseconds, since JVM last started. MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` @@ -99,3 +99,5 @@ func NewClusterJvm() *ClusterJvm { return r } + +// false diff --git a/typedapi/types/clusterjvmmemory.go b/typedapi/types/clusterjvmmemory.go index 2afdfcd58c..4fa977bb97 100644 --- a/typedapi/types/clusterjvmmemory.go +++ b/typedapi/types/clusterjvmmemory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterJvmMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L294-L303 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L294-L303 type ClusterJvmMemory struct { // HeapMaxInBytes Maximum amount of memory, in bytes, available for use by the heap across all // selected nodes. @@ -96,3 +96,5 @@ func NewClusterJvmMemory() *ClusterJvmMemory { return r } + +// false diff --git a/typedapi/types/clusterjvmversion.go b/typedapi/types/clusterjvmversion.go index af528031cf..ba839753d3 100644 --- a/typedapi/types/clusterjvmversion.go +++ b/typedapi/types/clusterjvmversion.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterJvmVersion type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L305-L335 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L305-L335 type ClusterJvmVersion struct { // BundledJdk Always `true`. All distributions come with a bundled Java Development Kit // (JDK). @@ -156,3 +156,5 @@ func NewClusterJvmVersion() *ClusterJvmVersion { return r } + +// false diff --git a/typedapi/types/clusternetworktypes.go b/typedapi/types/clusternetworktypes.go index c406feb99b..89e1fed7d2 100644 --- a/typedapi/types/clusternetworktypes.go +++ b/typedapi/types/clusternetworktypes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ClusterNetworkTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L337-L346 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L337-L346 type ClusterNetworkTypes struct { // HttpTypes Contains statistics about the HTTP network types used by selected nodes. HttpTypes map[string]int `json:"http_types"` @@ -33,9 +33,11 @@ type ClusterNetworkTypes struct { // NewClusterNetworkTypes returns a ClusterNetworkTypes. func NewClusterNetworkTypes() *ClusterNetworkTypes { r := &ClusterNetworkTypes{ - HttpTypes: make(map[string]int, 0), - TransportTypes: make(map[string]int, 0), + HttpTypes: make(map[string]int), + TransportTypes: make(map[string]int), } return r } + +// false diff --git a/typedapi/types/clusternode.go b/typedapi/types/clusternode.go index 514e7998e1..e838d0f76f 100644 --- a/typedapi/types/clusternode.go +++ b/typedapi/types/clusternode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ClusterNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/ClusterNode.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/ClusterNode.ts#L22-L24 type ClusterNode struct { Name string `json:"name"` } @@ -66,3 +66,5 @@ func NewClusterNode() *ClusterNode { return r } + +// false diff --git a/typedapi/types/clusternodecount.go b/typedapi/types/clusternodecount.go index a9da7045cc..18ed54a8a5 100644 --- a/typedapi/types/clusternodecount.go +++ b/typedapi/types/clusternodecount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterNodeCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L348-L367 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L348-L367 type ClusterNodeCount struct { CoordinatingOnly int `json:"coordinating_only"` Data int `json:"data"` @@ -299,3 +299,5 @@ func NewClusterNodeCount() *ClusterNodeCount { return r } + +// false diff --git a/typedapi/types/clusternodes.go b/typedapi/types/clusternodes.go index 6edb3c1d40..3a2e5bc09f 100644 --- a/typedapi/types/clusternodes.go +++ b/typedapi/types/clusternodes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ClusterNodes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L369-L402 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L369-L402 type ClusterNodes struct { // Count Contains counts for nodes selected by the request’s node filters. Count ClusterNodeCount `json:"count"` @@ -55,8 +55,10 @@ type ClusterNodes struct { // NewClusterNodes returns a ClusterNodes. func NewClusterNodes() *ClusterNodes { r := &ClusterNodes{ - DiscoveryTypes: make(map[string]int, 0), + DiscoveryTypes: make(map[string]int), } return r } + +// false diff --git a/typedapi/types/clusteroperatingsystem.go b/typedapi/types/clusteroperatingsystem.go index cf3357ff77..9d440f82e9 100644 --- a/typedapi/types/clusteroperatingsystem.go +++ b/typedapi/types/clusteroperatingsystem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterOperatingSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L415-L442 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L415-L442 type ClusterOperatingSystem struct { // AllocatedProcessors Number of processors used to calculate thread pool size across all selected // nodes. @@ -130,3 +130,5 @@ func NewClusterOperatingSystem() *ClusterOperatingSystem { return r } + +// false diff --git a/typedapi/types/clusteroperatingsystemarchitecture.go b/typedapi/types/clusteroperatingsystemarchitecture.go index cc07cb904a..49218ec765 100644 --- a/typedapi/types/clusteroperatingsystemarchitecture.go +++ b/typedapi/types/clusteroperatingsystemarchitecture.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterOperatingSystemArchitecture type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L404-L413 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L404-L413 type ClusterOperatingSystemArchitecture struct { // Arch Name of an architecture used by one or more selected nodes. Arch string `json:"arch"` @@ -93,3 +93,5 @@ func NewClusterOperatingSystemArchitecture() *ClusterOperatingSystemArchitecture return r } + +// false diff --git a/typedapi/types/clusteroperatingsystemname.go b/typedapi/types/clusteroperatingsystemname.go index 38783abd0b..9cf4b08560 100644 --- a/typedapi/types/clusteroperatingsystemname.go +++ b/typedapi/types/clusteroperatingsystemname.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterOperatingSystemName type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L444-L453 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L444-L453 type ClusterOperatingSystemName struct { // Count Number of selected nodes using the operating system. Count int `json:"count"` @@ -86,3 +86,5 @@ func NewClusterOperatingSystemName() *ClusterOperatingSystemName { return r } + +// false diff --git a/typedapi/types/clusteroperatingsystemprettyname.go b/typedapi/types/clusteroperatingsystemprettyname.go index 7e33073185..94db7119ab 100644 --- a/typedapi/types/clusteroperatingsystemprettyname.go +++ b/typedapi/types/clusteroperatingsystemprettyname.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterOperatingSystemPrettyName type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L455-L464 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L455-L464 type ClusterOperatingSystemPrettyName struct { // Count Number of selected nodes using the operating system. Count int `json:"count"` @@ -87,3 +87,5 @@ func NewClusterOperatingSystemPrettyName() *ClusterOperatingSystemPrettyName { return r } + +// false diff --git a/typedapi/types/clusterpressurememory.go b/typedapi/types/clusterpressurememory.go index 451bae362f..f1a9180b71 100644 --- a/typedapi/types/clusterpressurememory.go +++ b/typedapi/types/clusterpressurememory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterPressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L574-L578 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L574-L578 type ClusterPressureMemory struct { Current IndexingPressureMemorySummary `json:"current"` LimitInBytes int64 `json:"limit_in_bytes"` @@ -89,3 +89,5 @@ func NewClusterPressureMemory() *ClusterPressureMemory { return r } + +// false diff --git a/typedapi/types/clusterprocess.go b/typedapi/types/clusterprocess.go index faa321eea8..1f4e9c2187 100644 --- a/typedapi/types/clusterprocess.go +++ b/typedapi/types/clusterprocess.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ClusterProcess type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L466-L475 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L466-L475 type ClusterProcess struct { // Cpu Contains statistics about CPU used by selected nodes. Cpu ClusterProcessCpu `json:"cpu"` @@ -36,3 +36,5 @@ func NewClusterProcess() *ClusterProcess { return r } + +// false diff --git a/typedapi/types/clusterprocesscpu.go b/typedapi/types/clusterprocesscpu.go index 8bc7ec9bbc..697d3af992 100644 --- a/typedapi/types/clusterprocesscpu.go +++ b/typedapi/types/clusterprocesscpu.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterProcessCpu type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L477-L483 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L477-L483 type ClusterProcessCpu struct { // Percent Percentage of CPU used across all selected nodes. // Returns `-1` if not supported. @@ -80,3 +80,5 @@ func NewClusterProcessCpu() *ClusterProcessCpu { return r } + +// false diff --git a/typedapi/types/clusterprocessopenfiledescriptors.go b/typedapi/types/clusterprocessopenfiledescriptors.go index 91f137a22b..d69e1826a8 100644 --- a/typedapi/types/clusterprocessopenfiledescriptors.go +++ b/typedapi/types/clusterprocessopenfiledescriptors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterProcessOpenFileDescriptors type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L485-L501 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L485-L501 type ClusterProcessOpenFileDescriptors struct { // Avg Average number of concurrently open file descriptors. // Returns `-1` if not supported. @@ -117,3 +117,5 @@ func NewClusterProcessOpenFileDescriptors() *ClusterProcessOpenFileDescriptors { return r } + +// false diff --git a/typedapi/types/clusterprocessor.go b/typedapi/types/clusterprocessor.go index 2496edcd0a..7d95a7fd70 100644 --- a/typedapi/types/clusterprocessor.go +++ b/typedapi/types/clusterprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L503-L509 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L503-L509 type ClusterProcessor struct { Count int64 `json:"count"` Current int64 `json:"current"` @@ -121,3 +121,5 @@ func NewClusterProcessor() *ClusterProcessor { return r } + +// false diff --git a/typedapi/types/clusterremoteinfo.go b/typedapi/types/clusterremoteinfo.go index c5e67ab8c7..71dc507f98 100644 --- a/typedapi/types/clusterremoteinfo.go +++ b/typedapi/types/clusterremoteinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // ClusterRemoteSniffInfo // ClusterRemoteProxyInfo // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L29-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L29-L30 type ClusterRemoteInfo any diff --git a/typedapi/types/clusterremoteproxyinfo.go b/typedapi/types/clusterremoteproxyinfo.go index d38c994060..247eb4fee7 100644 --- a/typedapi/types/clusterremoteproxyinfo.go +++ b/typedapi/types/clusterremoteproxyinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,16 +31,35 @@ import ( // ClusterRemoteProxyInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L42-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L58-L83 type ClusterRemoteProxyInfo struct { - Connected bool `json:"connected"` - InitialConnectTimeout Duration `json:"initial_connect_timeout"` - MaxProxySocketConnections int `json:"max_proxy_socket_connections"` - Mode string `json:"mode,omitempty"` - NumProxySocketsConnected int `json:"num_proxy_sockets_connected"` - ProxyAddress string `json:"proxy_address"` - ServerName string `json:"server_name"` - SkipUnavailable bool `json:"skip_unavailable"` + // ClusterCredentials This field is present and has a value of `::es_redacted::` only when the + // remote cluster is configured with the API key based model. Otherwise, the + // field is not present. + ClusterCredentials *string `json:"cluster_credentials,omitempty"` + // Connected If it is `true`, there is at least one open connection to the remote cluster. + // If it is `false`, it means that the cluster no longer has an open connection + // to the remote cluster. + // It does not necessarily mean that the remote cluster is down or unavailable, + // just that at some point a connection was lost. + Connected bool `json:"connected"` + // InitialConnectTimeout The initial connect timeout for remote cluster connections. + InitialConnectTimeout Duration `json:"initial_connect_timeout"` + // MaxProxySocketConnections The maximum number of socket connections to the remote cluster when proxy + // mode is configured. + MaxProxySocketConnections int `json:"max_proxy_socket_connections"` + // Mode The connection mode for the remote cluster. + Mode string `json:"mode,omitempty"` + // NumProxySocketsConnected The number of open socket connections to the remote cluster when proxy mode + // is configured. + NumProxySocketsConnected int `json:"num_proxy_sockets_connected"` + // ProxyAddress The address for remote connections when proxy mode is configured. + ProxyAddress string `json:"proxy_address"` + ServerName string `json:"server_name"` + // SkipUnavailable If `true`, cross-cluster search skips the remote cluster when its nodes are + // unavailable during the search and ignores errors returned by the remote + // cluster. + SkipUnavailable bool `json:"skip_unavailable"` } func (s *ClusterRemoteProxyInfo) UnmarshalJSON(data []byte) error { @@ -58,6 +77,18 @@ func (s *ClusterRemoteProxyInfo) UnmarshalJSON(data []byte) error { switch t { + case "cluster_credentials": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClusterCredentials", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClusterCredentials = &o + case "connected": var tmp any dec.Decode(&tmp) @@ -161,6 +192,7 @@ func (s *ClusterRemoteProxyInfo) UnmarshalJSON(data []byte) error { func (s ClusterRemoteProxyInfo) MarshalJSON() ([]byte, error) { type innerClusterRemoteProxyInfo ClusterRemoteProxyInfo tmp := innerClusterRemoteProxyInfo{ + ClusterCredentials: s.ClusterCredentials, Connected: s.Connected, InitialConnectTimeout: s.InitialConnectTimeout, MaxProxySocketConnections: s.MaxProxySocketConnections, @@ -182,3 +214,5 @@ func NewClusterRemoteProxyInfo() *ClusterRemoteProxyInfo { return r } + +// false diff --git a/typedapi/types/clusterremotesniffinfo.go b/typedapi/types/clusterremotesniffinfo.go index f123e27f61..882b242c24 100644 --- a/typedapi/types/clusterremotesniffinfo.go +++ b/typedapi/types/clusterremotesniffinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,15 +31,31 @@ import ( // ClusterRemoteSniffInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L32-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L32-L56 type ClusterRemoteSniffInfo struct { - Connected bool `json:"connected"` - InitialConnectTimeout Duration `json:"initial_connect_timeout"` - MaxConnectionsPerCluster int `json:"max_connections_per_cluster"` - Mode string `json:"mode,omitempty"` - NumNodesConnected int64 `json:"num_nodes_connected"` - Seeds []string `json:"seeds"` - SkipUnavailable bool `json:"skip_unavailable"` + // Connected If it is `true`, there is at least one open connection to the remote cluster. + // If it is `false`, it means that the cluster no longer has an open connection + // to the remote cluster. + // It does not necessarily mean that the remote cluster is down or unavailable, + // just that at some point a connection was lost. + Connected bool `json:"connected"` + // InitialConnectTimeout The initial connect timeout for remote cluster connections. + InitialConnectTimeout Duration `json:"initial_connect_timeout"` + // MaxConnectionsPerCluster The maximum number of connections maintained for the remote cluster when + // sniff mode is configured. + MaxConnectionsPerCluster int `json:"max_connections_per_cluster"` + // Mode The connection mode for the remote cluster. + Mode string `json:"mode,omitempty"` + // NumNodesConnected The number of connected nodes in the remote cluster when sniff mode is + // configured. + NumNodesConnected int64 `json:"num_nodes_connected"` + // Seeds The initial seed transport addresses of the remote cluster when sniff mode is + // configured. + Seeds []string `json:"seeds"` + // SkipUnavailable If `true`, cross-cluster search skips the remote cluster when its nodes are + // unavailable during the search and ignores errors returned by the remote + // cluster. + SkipUnavailable bool `json:"skip_unavailable"` } func (s *ClusterRemoteSniffInfo) UnmarshalJSON(data []byte) error { @@ -160,3 +176,5 @@ func NewClusterRemoteSniffInfo() *ClusterRemoteSniffInfo { return r } + +// false diff --git a/typedapi/types/clusterruntimefieldtypes.go b/typedapi/types/clusterruntimefieldtypes.go index 17f378f744..35302ae10f 100644 --- a/typedapi/types/clusterruntimefieldtypes.go +++ b/typedapi/types/clusterruntimefieldtypes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterRuntimeFieldTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L169-L226 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L169-L226 type ClusterRuntimeFieldTypes struct { // CharsMax Maximum number of characters for a single runtime field script. CharsMax int `json:"chars_max"` @@ -296,3 +296,5 @@ func NewClusterRuntimeFieldTypes() *ClusterRuntimeFieldTypes { return r } + +// false diff --git a/typedapi/types/clustershardmetrics.go b/typedapi/types/clustershardmetrics.go index a30448367f..a3064f7933 100644 --- a/typedapi/types/clustershardmetrics.go +++ b/typedapi/types/clustershardmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterShardMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L511-L524 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L511-L524 type ClusterShardMetrics struct { // Avg Mean number of shards in an index, counting only shards assigned to selected // nodes. @@ -118,3 +118,5 @@ func NewClusterShardMetrics() *ClusterShardMetrics { return r } + +// false diff --git a/typedapi/types/clusterstatequeue.go b/typedapi/types/clusterstatequeue.go index 4b3eefeb00..73ca10327a 100644 --- a/typedapi/types/clusterstatequeue.go +++ b/typedapi/types/clusterstatequeue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterStateQueue type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L248-L261 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L248-L261 type ClusterStateQueue struct { // Committed Number of committed cluster states in queue. Committed *int64 `json:"committed,omitempty"` @@ -112,3 +112,5 @@ func NewClusterStateQueue() *ClusterStateQueue { return r } + +// false diff --git a/typedapi/types/clusterstateupdate.go b/typedapi/types/clusterstateupdate.go index 3fe6aecaba..b679af1cbf 100644 --- a/typedapi/types/clusterstateupdate.go +++ b/typedapi/types/clusterstateupdate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterStateUpdate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L278-L343 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L278-L343 type ClusterStateUpdate struct { // CommitTime The cumulative amount of time spent waiting for a successful cluster state // update to commit, which measures the time from the start of each publication @@ -216,3 +216,5 @@ func NewClusterStateUpdate() *ClusterStateUpdate { return r } + +// false diff --git a/typedapi/types/clusterstatistics.go b/typedapi/types/clusterstatistics.go index 5e3b92c6fd..76bbc3aab6 100644 --- a/typedapi/types/clusterstatistics.go +++ b/typedapi/types/clusterstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ClusterStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L27-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L27-L35 type ClusterStatistics struct { Details map[string]ClusterDetails `json:"details,omitempty"` Failed int `json:"failed"` @@ -169,8 +169,10 @@ func (s *ClusterStatistics) UnmarshalJSON(data []byte) error { // NewClusterStatistics returns a ClusterStatistics. func NewClusterStatistics() *ClusterStatistics { r := &ClusterStatistics{ - Details: make(map[string]ClusterDetails, 0), + Details: make(map[string]ClusterDetails), } return r } + +// false diff --git a/typedapi/types/collector.go b/typedapi/types/collector.go index 3778d0506b..94913c9ef9 100644 --- a/typedapi/types/collector.go +++ b/typedapi/types/collector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Collector type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L86-L91 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L94-L99 type Collector struct { Children []Collector `json:"children,omitempty"` Name string `json:"name"` @@ -99,3 +99,5 @@ func NewCollector() *Collector { return r } + +// false diff --git a/typedapi/types/column.go b/typedapi/types/column.go index 75db9312ed..1916887b9d 100644 --- a/typedapi/types/column.go +++ b/typedapi/types/column.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Column type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/sql/types.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/types.ts#L23-L26 type Column struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewColumn() *Column { return r } + +// false diff --git a/typedapi/types/combinedfieldsquery.go b/typedapi/types/combinedfieldsquery.go index d60f804a30..4b59d6a218 100644 --- a/typedapi/types/combinedfieldsquery.go +++ b/typedapi/types/combinedfieldsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // CombinedFieldsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/abstractions.ts#L465-L499 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/abstractions.ts#L472-L506 type CombinedFieldsQuery struct { // AutoGenerateSynonymsPhraseQuery If true, match phrase queries are automatically created for multi-term // synonyms. @@ -162,3 +162,13 @@ func NewCombinedFieldsQuery() *CombinedFieldsQuery { return r } + +// true + +type CombinedFieldsQueryVariant interface { + CombinedFieldsQueryCaster() *CombinedFieldsQuery +} + +func (s *CombinedFieldsQuery) CombinedFieldsQueryCaster() *CombinedFieldsQuery { + return s +} diff --git a/typedapi/types/command.go b/typedapi/types/command.go index 800db6e685..7aca6ae130 100644 --- a/typedapi/types/command.go +++ b/typedapi/types/command.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Command type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/types.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/types.ts#L22-L43 type Command struct { // AllocateEmptyPrimary Allocate an empty primary shard to a node. Accepts the index and shard for // index name and shard number, and node to allocate the shard to. Using this @@ -66,3 +66,13 @@ func NewCommand() *Command { return r } + +// true + +type CommandVariant interface { + CommandCaster() *Command +} + +func (s *Command) CommandCaster() *Command { + return s +} diff --git a/typedapi/types/commandallocateprimaryaction.go b/typedapi/types/commandallocateprimaryaction.go index c948f0d5d4..fdb2514ed8 100644 --- a/typedapi/types/commandallocateprimaryaction.go +++ b/typedapi/types/commandallocateprimaryaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CommandAllocatePrimaryAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/types.ts#L78-L84 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/types.ts#L78-L84 type CommandAllocatePrimaryAction struct { // AcceptDataLoss If a node which has a copy of the data rejoins the cluster later on, that // data will be deleted. To ensure that these implications are well-understood, @@ -115,3 +115,13 @@ func NewCommandAllocatePrimaryAction() *CommandAllocatePrimaryAction { return r } + +// true + +type CommandAllocatePrimaryActionVariant interface { + CommandAllocatePrimaryActionCaster() *CommandAllocatePrimaryAction +} + +func (s *CommandAllocatePrimaryAction) CommandAllocatePrimaryActionCaster() *CommandAllocatePrimaryAction { + return s +} diff --git a/typedapi/types/commandallocatereplicaaction.go b/typedapi/types/commandallocatereplicaaction.go index 36ca54c4bf..9267b519f0 100644 --- a/typedapi/types/commandallocatereplicaaction.go +++ b/typedapi/types/commandallocatereplicaaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CommandAllocateReplicaAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/types.ts#L69-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/types.ts#L69-L76 type CommandAllocateReplicaAction struct { Index string `json:"index"` Node string `json:"node"` @@ -97,3 +97,13 @@ func NewCommandAllocateReplicaAction() *CommandAllocateReplicaAction { return r } + +// true + +type CommandAllocateReplicaActionVariant interface { + CommandAllocateReplicaActionCaster() *CommandAllocateReplicaAction +} + +func (s *CommandAllocateReplicaAction) CommandAllocateReplicaActionCaster() *CommandAllocateReplicaAction { + return s +} diff --git a/typedapi/types/commandcancelaction.go b/typedapi/types/commandcancelaction.go index e6d4234415..36c6d1af59 100644 --- a/typedapi/types/commandcancelaction.go +++ b/typedapi/types/commandcancelaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CommandCancelAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/types.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/types.ts#L45-L50 type CommandCancelAction struct { AllowPrimary *bool `json:"allow_primary,omitempty"` Index string `json:"index"` @@ -112,3 +112,13 @@ func NewCommandCancelAction() *CommandCancelAction { return r } + +// true + +type CommandCancelActionVariant interface { + CommandCancelActionCaster() *CommandCancelAction +} + +func (s *CommandCancelAction) CommandCancelActionCaster() *CommandCancelAction { + return s +} diff --git a/typedapi/types/commandmoveaction.go b/typedapi/types/commandmoveaction.go index 8e1f65f251..66b0af247e 100644 --- a/typedapi/types/commandmoveaction.go +++ b/typedapi/types/commandmoveaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CommandMoveAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/types.ts#L60-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/types.ts#L60-L67 type CommandMoveAction struct { // FromNode The node to move the shard from FromNode string `json:"from_node"` @@ -112,3 +112,13 @@ func NewCommandMoveAction() *CommandMoveAction { return r } + +// true + +type CommandMoveActionVariant interface { + CommandMoveActionCaster() *CommandMoveAction +} + +func (s *CommandMoveAction) CommandMoveActionCaster() *CommandMoveAction { + return s +} diff --git a/typedapi/types/commoncatqueryparameters.go b/typedapi/types/commoncatqueryparameters.go deleted file mode 100644 index 9efc5506c9..0000000000 --- a/typedapi/types/commoncatqueryparameters.go +++ /dev/null @@ -1,175 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// CommonCatQueryParameters type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_spec_utils/behaviors.ts#L86-L132 -type CommonCatQueryParameters struct { - // Format Specifies the format to return the columnar data in, can be set to - // `text`, `json`, `cbor`, `yaml`, or `smile`. - Format *string `json:"format,omitempty"` - // H List of columns to appear in the response. Supports simple wildcards. - H []string `json:"h,omitempty"` - // Help When set to `true` will output available columns. This option - // can't be combined with any other query string option. - Help *bool `json:"help,omitempty"` - // Local If `true`, the request computes the list of selected nodes from the - // local cluster state. If `false` the list of selected nodes are computed - // from the cluster state of the master node. In both cases the coordinating - // node will send requests for further information to each selected node. - Local *bool `json:"local,omitempty"` - // MasterTimeout Period to wait for a connection to the master node. - MasterTimeout Duration `json:"master_timeout,omitempty"` - // S List of columns that determine how the table should be sorted. - // Sorting defaults to ascending and can be changed by setting `:asc` - // or `:desc` as a suffix to the column name. - S []string `json:"s,omitempty"` - // V When set to `true` will enable verbose output. - V *bool `json:"v,omitempty"` -} - -func (s *CommonCatQueryParameters) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "format": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Format", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Format = &o - - case "h": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "H", err) - } - - s.H = append(s.H, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.H); err != nil { - return fmt.Errorf("%s | %w", "H", err) - } - } - - case "help": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Help", err) - } - s.Help = &value - case bool: - s.Help = &v - } - - case "local": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Local", err) - } - s.Local = &value - case bool: - s.Local = &v - } - - case "master_timeout": - if err := dec.Decode(&s.MasterTimeout); err != nil { - return fmt.Errorf("%s | %w", "MasterTimeout", err) - } - - case "s": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "S", err) - } - - s.S = append(s.S, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.S); err != nil { - return fmt.Errorf("%s | %w", "S", err) - } - } - - case "v": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "V", err) - } - s.V = &value - case bool: - s.V = &v - } - - } - } - return nil -} - -// NewCommonCatQueryParameters returns a CommonCatQueryParameters. -func NewCommonCatQueryParameters() *CommonCatQueryParameters { - r := &CommonCatQueryParameters{} - - return r -} diff --git a/typedapi/types/commongramstokenfilter.go b/typedapi/types/commongramstokenfilter.go index 0d814b4f0b..211d197654 100644 --- a/typedapi/types/commongramstokenfilter.go +++ b/typedapi/types/commongramstokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CommonGramsTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L175-L181 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L174-L180 type CommonGramsTokenFilter struct { CommonWords []string `json:"common_words,omitempty"` CommonWordsPath *string `json:"common_words_path,omitempty"` @@ -139,3 +139,13 @@ func NewCommonGramsTokenFilter() *CommonGramsTokenFilter { return r } + +// true + +type CommonGramsTokenFilterVariant interface { + CommonGramsTokenFilterCaster() *CommonGramsTokenFilter +} + +func (s *CommonGramsTokenFilter) CommonGramsTokenFilterCaster() *CommonGramsTokenFilter { + return s +} diff --git a/typedapi/types/commonqueryparameters.go b/typedapi/types/commonqueryparameters.go deleted file mode 100644 index ff2be6a10e..0000000000 --- a/typedapi/types/commonqueryparameters.go +++ /dev/null @@ -1,137 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// CommonQueryParameters type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_spec_utils/behaviors.ts#L50-L84 -type CommonQueryParameters struct { - // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors - // when they occur. - ErrorTrace *bool `json:"error_trace,omitempty"` - // FilterPath Comma-separated list of filters in dot notation which reduce the response - // returned by Elasticsearch. - FilterPath []string `json:"filter_path,omitempty"` - // Human When set to `true` will return statistics in a format suitable for humans. - // For example `"exists_time": "1h"` for humans and - // `"eixsts_time_in_millis": 3600000` for computers. When disabled the human - // readable values will be omitted. This makes sense for responses being - // consumed - // only by machines. - Human *bool `json:"human,omitempty"` - // Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use - // this option for debugging only. - Pretty *bool `json:"pretty,omitempty"` -} - -func (s *CommonQueryParameters) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "error_trace": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "ErrorTrace", err) - } - s.ErrorTrace = &value - case bool: - s.ErrorTrace = &v - } - - case "filter_path": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "FilterPath", err) - } - - s.FilterPath = append(s.FilterPath, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.FilterPath); err != nil { - return fmt.Errorf("%s | %w", "FilterPath", err) - } - } - - case "human": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Human", err) - } - s.Human = &value - case bool: - s.Human = &v - } - - case "pretty": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Pretty", err) - } - s.Pretty = &value - case bool: - s.Pretty = &v - } - - } - } - return nil -} - -// NewCommonQueryParameters returns a CommonQueryParameters. -func NewCommonQueryParameters() *CommonQueryParameters { - r := &CommonQueryParameters{} - - return r -} diff --git a/typedapi/types/commontermsquery.go b/typedapi/types/commontermsquery.go index 21913d2dcc..56a75fea02 100644 --- a/typedapi/types/commontermsquery.go +++ b/typedapi/types/commontermsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // CommonTermsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L34-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L34-L44 type CommonTermsQuery struct { Analyzer *string `json:"analyzer,omitempty"` // Boost Floating point number used to decrease or increase the relevance scores of @@ -171,3 +171,13 @@ func NewCommonTermsQuery() *CommonTermsQuery { return r } + +// true + +type CommonTermsQueryVariant interface { + CommonTermsQueryCaster() *CommonTermsQuery +} + +func (s *CommonTermsQuery) CommonTermsQueryCaster() *CommonTermsQuery { + return s +} diff --git a/typedapi/types/communityidprocessor.go b/typedapi/types/communityidprocessor.go new file mode 100644 index 0000000000..74d6df5577 --- /dev/null +++ b/typedapi/types/communityidprocessor.go @@ -0,0 +1,243 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CommunityIDProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L598-L659 +type CommunityIDProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // DestinationIp Field containing the destination IP address. + DestinationIp *string `json:"destination_ip,omitempty"` + // DestinationPort Field containing the destination port. + DestinationPort *string `json:"destination_port,omitempty"` + // IanaNumber Field containing the IANA number. + IanaNumber *string `json:"iana_number,omitempty"` + // IcmpCode Field containing the ICMP code. + IcmpCode *string `json:"icmp_code,omitempty"` + // IcmpType Field containing the ICMP type. + IcmpType *string `json:"icmp_type,omitempty"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If true and any required fields are missing, the processor quietly exits + // without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Seed Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The + // seed can prevent hash collisions between network domains, such as a staging + // and production network that use the same addressing scheme. + Seed *int `json:"seed,omitempty"` + // SourceIp Field containing the source IP address. + SourceIp *string `json:"source_ip,omitempty"` + // SourcePort Field containing the source port. + SourcePort *string `json:"source_port,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Output field for the community ID. + TargetField *string `json:"target_field,omitempty"` + // Transport Field containing the transport protocol name or number. Used only when the + // iana_number field is not present. The following protocol names are currently + // supported: eigrp, gre, icmp, icmpv6, igmp, ipv6-icmp, ospf, pim, sctp, tcp, + // udp + Transport *string `json:"transport,omitempty"` +} + +func (s *CommunityIDProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "destination_ip": + if err := dec.Decode(&s.DestinationIp); err != nil { + return fmt.Errorf("%s | %w", "DestinationIp", err) + } + + case "destination_port": + if err := dec.Decode(&s.DestinationPort); err != nil { + return fmt.Errorf("%s | %w", "DestinationPort", err) + } + + case "iana_number": + if err := dec.Decode(&s.IanaNumber); err != nil { + return fmt.Errorf("%s | %w", "IanaNumber", err) + } + + case "icmp_code": + if err := dec.Decode(&s.IcmpCode); err != nil { + return fmt.Errorf("%s | %w", "IcmpCode", err) + } + + case "icmp_type": + if err := dec.Decode(&s.IcmpType); err != nil { + return fmt.Errorf("%s | %w", "IcmpType", err) + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "seed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Seed", err) + } + s.Seed = &value + case float64: + f := int(v) + s.Seed = &f + } + + case "source_ip": + if err := dec.Decode(&s.SourceIp); err != nil { + return fmt.Errorf("%s | %w", "SourceIp", err) + } + + case "source_port": + if err := dec.Decode(&s.SourcePort); err != nil { + return fmt.Errorf("%s | %w", "SourcePort", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + case "transport": + if err := dec.Decode(&s.Transport); err != nil { + return fmt.Errorf("%s | %w", "Transport", err) + } + + } + } + return nil +} + +// NewCommunityIDProcessor returns a CommunityIDProcessor. +func NewCommunityIDProcessor() *CommunityIDProcessor { + r := &CommunityIDProcessor{} + + return r +} + +// true + +type CommunityIDProcessorVariant interface { + CommunityIDProcessorCaster() *CommunityIDProcessor +} + +func (s *CommunityIDProcessor) CommunityIDProcessorCaster() *CommunityIDProcessor { + return s +} diff --git a/typedapi/types/compactnodeinfo.go b/typedapi/types/compactnodeinfo.go index 160f43317f..7c629882fb 100644 --- a/typedapi/types/compactnodeinfo.go +++ b/typedapi/types/compactnodeinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // CompactNodeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L27-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L27-L29 type CompactNodeInfo struct { Name string `json:"name"` } @@ -66,3 +66,5 @@ func NewCompactNodeInfo() *CompactNodeInfo { return r } + +// false diff --git a/typedapi/types/completioncontext.go b/typedapi/types/completioncontext.go index eb6a03a4a4..5180131be0 100644 --- a/typedapi/types/completioncontext.go +++ b/typedapi/types/completioncontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CompletionContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L235-L264 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L235-L264 type CompletionContext struct { // Boost The factor by which the score of the suggestion should be boosted. // The score is computed by multiplying the boost with the suggestion weight. @@ -129,3 +129,13 @@ func NewCompletionContext() *CompletionContext { return r } + +// true + +type CompletionContextVariant interface { + CompletionContextCaster() *CompletionContext +} + +func (s *CompletionContext) CompletionContextCaster() *CompletionContext { + return s +} diff --git a/typedapi/types/completionproperty.go b/typedapi/types/completionproperty.go index 20ef376d1e..2100a0e52c 100644 --- a/typedapi/types/completionproperty.go +++ b/typedapi/types/completionproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // CompletionProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/specialized.ts#L33-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L33-L41 type CompletionProperty struct { Analyzer *string `json:"analyzer,omitempty"` Contexts []SuggestContext `json:"contexts,omitempty"` @@ -44,14 +45,14 @@ type CompletionProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` MaxInputLength *int `json:"max_input_length,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - PreservePositionIncrements *bool `json:"preserve_position_increments,omitempty"` - PreserveSeparators *bool `json:"preserve_separators,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - SearchAnalyzer *string `json:"search_analyzer,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + PreservePositionIncrements *bool `json:"preserve_position_increments,omitempty"` + PreserveSeparators *bool `json:"preserve_separators,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *CompletionProperty) UnmarshalJSON(data []byte) error { @@ -140,301 +141,313 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -527,301 +540,313 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -839,18 +864,6 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { } s.SearchAnalyzer = &o - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -865,6 +878,11 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -892,8 +910,8 @@ func (s CompletionProperty) MarshalJSON() ([]byte, error) { PreserveSeparators: s.PreserveSeparators, Properties: s.Properties, SearchAnalyzer: s.SearchAnalyzer, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -905,10 +923,20 @@ func (s CompletionProperty) MarshalJSON() ([]byte, error) { // NewCompletionProperty returns a CompletionProperty. func NewCompletionProperty() *CompletionProperty { r := &CompletionProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type CompletionPropertyVariant interface { + CompletionPropertyCaster() *CompletionProperty +} + +func (s *CompletionProperty) CompletionPropertyCaster() *CompletionProperty { + return s +} diff --git a/typedapi/types/completionresult.go b/typedapi/types/completionresult.go index 11e7e1c292..4afcdceddc 100644 --- a/typedapi/types/completionresult.go +++ b/typedapi/types/completionresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CompletionResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Results.ts#L60-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Results.ts#L76-L81 type CompletionResult struct { Result string `json:"result"` } @@ -74,3 +74,5 @@ func NewCompletionResult() *CompletionResult { return r } + +// false diff --git a/typedapi/types/completionstats.go b/typedapi/types/completionstats.go index 178d74a935..4bc7bc34ff 100644 --- a/typedapi/types/completionstats.go +++ b/typedapi/types/completionstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CompletionStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L80-L90 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L83-L93 type CompletionStats struct { Fields map[string]FieldSizeUsage `json:"fields,omitempty"` // Size Total amount of memory used for completion across all shards assigned to @@ -93,8 +93,10 @@ func (s *CompletionStats) UnmarshalJSON(data []byte) error { // NewCompletionStats returns a CompletionStats. func NewCompletionStats() *CompletionStats { r := &CompletionStats{ - Fields: make(map[string]FieldSizeUsage, 0), + Fields: make(map[string]FieldSizeUsage), } return r } + +// false diff --git a/typedapi/types/completionsuggest.go b/typedapi/types/completionsuggest.go index e832422cbb..b43ed1ae28 100644 --- a/typedapi/types/completionsuggest.go +++ b/typedapi/types/completionsuggest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CompletionSuggest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L48-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L48-L55 type CompletionSuggest struct { Length int `json:"length"` Offset int `json:"offset"` @@ -125,3 +125,5 @@ func NewCompletionSuggest() *CompletionSuggest { return r } + +// false diff --git a/typedapi/types/completionsuggester.go b/typedapi/types/completionsuggester.go index 434cf876b5..a17f814dbc 100644 --- a/typedapi/types/completionsuggester.go +++ b/typedapi/types/completionsuggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CompletionSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L163-L181 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L163-L181 type CompletionSuggester struct { // Analyzer The analyzer to analyze the suggest text with. // Defaults to the search analyzer of the suggest field. @@ -158,8 +158,18 @@ func (s *CompletionSuggester) UnmarshalJSON(data []byte) error { // NewCompletionSuggester returns a CompletionSuggester. func NewCompletionSuggester() *CompletionSuggester { r := &CompletionSuggester{ - Contexts: make(map[string][]CompletionContext, 0), + Contexts: make(map[string][]CompletionContext), } return r } + +// true + +type CompletionSuggesterVariant interface { + CompletionSuggesterCaster() *CompletionSuggester +} + +func (s *CompletionSuggester) CompletionSuggesterCaster() *CompletionSuggester { + return s +} diff --git a/typedapi/types/completionsuggestoption.go b/typedapi/types/completionsuggestoption.go index e6caaa7a29..bb36249706 100644 --- a/typedapi/types/completionsuggestoption.go +++ b/typedapi/types/completionsuggestoption.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CompletionSuggestOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L73-L84 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L73-L84 type CompletionSuggestOption struct { CollateMatch *bool `json:"collate_match,omitempty"` Contexts map[string][]Context `json:"contexts,omitempty"` @@ -169,9 +169,11 @@ func (s *CompletionSuggestOption) UnmarshalJSON(data []byte) error { // NewCompletionSuggestOption returns a CompletionSuggestOption. func NewCompletionSuggestOption() *CompletionSuggestOption { r := &CompletionSuggestOption{ - Contexts: make(map[string][]Context, 0), - Fields: make(map[string]json.RawMessage, 0), + Contexts: make(map[string][]Context), + Fields: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/completiontool.go b/typedapi/types/completiontool.go new file mode 100644 index 0000000000..2ce94a995d --- /dev/null +++ b/typedapi/types/completiontool.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionTool type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L215-L227 +type CompletionTool struct { + // Function The function definition. + Function CompletionToolFunction `json:"function"` + // Type The type of tool. + Type string `json:"type"` +} + +func (s *CompletionTool) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewCompletionTool returns a CompletionTool. +func NewCompletionTool() *CompletionTool { + r := &CompletionTool{} + + return r +} + +// true + +type CompletionToolVariant interface { + CompletionToolCaster() *CompletionTool +} + +func (s *CompletionTool) CompletionToolCaster() *CompletionTool { + return s +} diff --git a/typedapi/types/completiontoolchoice.go b/typedapi/types/completiontoolchoice.go new file mode 100644 index 0000000000..3f08a79dfa --- /dev/null +++ b/typedapi/types/completiontoolchoice.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionToolChoice type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L178-L190 +type CompletionToolChoice struct { + // Function The tool choice function. + Function CompletionToolChoiceFunction `json:"function"` + // Type The type of the tool. + Type string `json:"type"` +} + +func (s *CompletionToolChoice) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewCompletionToolChoice returns a CompletionToolChoice. +func NewCompletionToolChoice() *CompletionToolChoice { + r := &CompletionToolChoice{} + + return r +} + +// true + +type CompletionToolChoiceVariant interface { + CompletionToolChoiceCaster() *CompletionToolChoice +} + +func (s *CompletionToolChoice) CompletionToolChoiceCaster() *CompletionToolChoice { + return s +} diff --git a/typedapi/types/completiontoolchoicefunction.go b/typedapi/types/completiontoolchoicefunction.go new file mode 100644 index 0000000000..5dc7f4f904 --- /dev/null +++ b/typedapi/types/completiontoolchoicefunction.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionToolChoiceFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L167-L176 +type CompletionToolChoiceFunction struct { + // Name The name of the function to call. + Name string `json:"name"` +} + +func (s *CompletionToolChoiceFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewCompletionToolChoiceFunction returns a CompletionToolChoiceFunction. +func NewCompletionToolChoiceFunction() *CompletionToolChoiceFunction { + r := &CompletionToolChoiceFunction{} + + return r +} + +// true + +type CompletionToolChoiceFunctionVariant interface { + CompletionToolChoiceFunctionCaster() *CompletionToolChoiceFunction +} + +func (s *CompletionToolChoiceFunction) CompletionToolChoiceFunctionCaster() *CompletionToolChoiceFunction { + return s +} diff --git a/typedapi/types/completiontoolfunction.go b/typedapi/types/completiontoolfunction.go new file mode 100644 index 0000000000..8624d6bda5 --- /dev/null +++ b/typedapi/types/completiontoolfunction.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionToolFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L192-L213 +type CompletionToolFunction struct { + // Description A description of what the function does. + // This is used by the model to choose when and how to call the function. + Description *string `json:"description,omitempty"` + // Name The name of the function. + Name string `json:"name"` + // Parameters The parameters the functional accepts. This should be formatted as a JSON + // object. + Parameters json.RawMessage `json:"parameters,omitempty"` + // Strict Whether to enable schema adherence when generating the function call. + Strict *bool `json:"strict,omitempty"` +} + +func (s *CompletionToolFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "parameters": + if err := dec.Decode(&s.Parameters); err != nil { + return fmt.Errorf("%s | %w", "Parameters", err) + } + + case "strict": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Strict", err) + } + s.Strict = &value + case bool: + s.Strict = &v + } + + } + } + return nil +} + +// NewCompletionToolFunction returns a CompletionToolFunction. +func NewCompletionToolFunction() *CompletionToolFunction { + r := &CompletionToolFunction{} + + return r +} + +// true + +type CompletionToolFunctionVariant interface { + CompletionToolFunctionCaster() *CompletionToolFunction +} + +func (s *CompletionToolFunction) CompletionToolFunctionCaster() *CompletionToolFunction { + return s +} diff --git a/typedapi/types/completiontooltype.go b/typedapi/types/completiontooltype.go new file mode 100644 index 0000000000..036ff06a20 --- /dev/null +++ b/typedapi/types/completiontooltype.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// CompletionToolType holds the union for the following types: +// +// string +// CompletionToolChoice +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L89-L92 +type CompletionToolType any + +type CompletionToolTypeVariant interface { + CompletionToolTypeCaster() *CompletionToolType +} diff --git a/typedapi/types/componenttemplatenode.go b/typedapi/types/componenttemplatenode.go index adcaec6ec9..9aef8ba495 100644 --- a/typedapi/types/componenttemplatenode.go +++ b/typedapi/types/componenttemplatenode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,15 +26,17 @@ import ( "errors" "fmt" "io" + "strconv" ) // ComponentTemplateNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/_types/ComponentTemplate.ts#L35-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/_types/ComponentTemplate.ts#L32-L41 type ComponentTemplateNode struct { - Meta_ Metadata `json:"_meta,omitempty"` - Template ComponentTemplateSummary `json:"template"` - Version *int64 `json:"version,omitempty"` + Deprecated *bool `json:"deprecated,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` + Template ComponentTemplateSummary `json:"template"` + Version *int64 `json:"version,omitempty"` } func (s *ComponentTemplateNode) UnmarshalJSON(data []byte) error { @@ -52,6 +54,20 @@ func (s *ComponentTemplateNode) UnmarshalJSON(data []byte) error { switch t { + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + case "_meta": if err := dec.Decode(&s.Meta_); err != nil { return fmt.Errorf("%s | %w", "Meta_", err) @@ -78,3 +94,13 @@ func NewComponentTemplateNode() *ComponentTemplateNode { return r } + +// true + +type ComponentTemplateNodeVariant interface { + ComponentTemplateNodeCaster() *ComponentTemplateNode +} + +func (s *ComponentTemplateNode) ComponentTemplateNodeCaster() *ComponentTemplateNode { + return s +} diff --git a/typedapi/types/componenttemplatesummary.go b/typedapi/types/componenttemplatesummary.go index ff8baa5eab..d044801dbe 100644 --- a/typedapi/types/componenttemplatesummary.go +++ b/typedapi/types/componenttemplatesummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ComponentTemplateSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/_types/ComponentTemplate.ts#L42-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/_types/ComponentTemplate.ts#L43-L55 type ComponentTemplateSummary struct { Aliases map[string]AliasDefinition `json:"aliases,omitempty"` Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` @@ -99,9 +99,19 @@ func (s *ComponentTemplateSummary) UnmarshalJSON(data []byte) error { // NewComponentTemplateSummary returns a ComponentTemplateSummary. func NewComponentTemplateSummary() *ComponentTemplateSummary { r := &ComponentTemplateSummary{ - Aliases: make(map[string]AliasDefinition, 0), - Settings: make(map[string]IndexSettings, 0), + Aliases: make(map[string]AliasDefinition), + Settings: make(map[string]IndexSettings), } return r } + +// true + +type ComponentTemplateSummaryVariant interface { + ComponentTemplateSummaryCaster() *ComponentTemplateSummary +} + +func (s *ComponentTemplateSummary) ComponentTemplateSummaryCaster() *ComponentTemplateSummary { + return s +} diff --git a/typedapi/types/compositeaggregate.go b/typedapi/types/compositeaggregate.go index b99ea3bdb2..80e6ca0288 100644 --- a/typedapi/types/compositeaggregate.go +++ b/typedapi/types/compositeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // CompositeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L622-L627 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L698-L703 type CompositeAggregate struct { AfterKey CompositeAggregateKey `json:"after_key,omitempty"` Buckets BucketsCompositeBucket `json:"buckets"` @@ -94,3 +94,5 @@ func NewCompositeAggregate() *CompositeAggregate { return r } + +// false diff --git a/typedapi/types/compositeaggregatekey.go b/typedapi/types/compositeaggregatekey.go index e7647b254a..b2c329f201 100644 --- a/typedapi/types/compositeaggregatekey.go +++ b/typedapi/types/compositeaggregatekey.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // CompositeAggregateKey type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L120-L120 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L128-L128 type CompositeAggregateKey map[string]FieldValue + +type CompositeAggregateKeyVariant interface { + CompositeAggregateKeyCaster() *CompositeAggregateKey +} diff --git a/typedapi/types/compositeaggregation.go b/typedapi/types/compositeaggregation.go index dfb5844a20..fa6d9d0143 100644 --- a/typedapi/types/compositeaggregation.go +++ b/typedapi/types/compositeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CompositeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L122-L138 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L130-L149 type CompositeAggregation struct { // After When paginating, use the `after_key` value returned in the previous response // to retrieve the next page. @@ -95,3 +95,13 @@ func NewCompositeAggregation() *CompositeAggregation { return r } + +// true + +type CompositeAggregationVariant interface { + CompositeAggregationCaster() *CompositeAggregation +} + +func (s *CompositeAggregation) CompositeAggregationCaster() *CompositeAggregation { + return s +} diff --git a/typedapi/types/compositeaggregationsource.go b/typedapi/types/compositeaggregationsource.go index e74a39bfb3..376f2d2539 100644 --- a/typedapi/types/compositeaggregationsource.go +++ b/typedapi/types/compositeaggregationsource.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // CompositeAggregationSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L140-L157 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L151-L168 type CompositeAggregationSource struct { // DateHistogram A date histogram aggregation. DateHistogram *CompositeDateHistogramAggregation `json:"date_histogram,omitempty"` @@ -40,3 +40,13 @@ func NewCompositeAggregationSource() *CompositeAggregationSource { return r } + +// true + +type CompositeAggregationSourceVariant interface { + CompositeAggregationSourceCaster() *CompositeAggregationSource +} + +func (s *CompositeAggregationSource) CompositeAggregationSourceCaster() *CompositeAggregationSource { + return s +} diff --git a/typedapi/types/compositebucket.go b/typedapi/types/compositebucket.go index f7440a3ade..66ed519980 100644 --- a/typedapi/types/compositebucket.go +++ b/typedapi/types/compositebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // CompositeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L629-L631 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L705-L707 type CompositeBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *CompositeBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s CompositeBucket) MarshalJSON() ([]byte, error) { // NewCompositeBucket returns a CompositeBucket. func NewCompositeBucket() *CompositeBucket { r := &CompositeBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/compositedatehistogramaggregation.go b/typedapi/types/compositedatehistogramaggregation.go index 53c6e3d3e4..0888def1a5 100644 --- a/typedapi/types/compositedatehistogramaggregation.go +++ b/typedapi/types/compositedatehistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -35,7 +35,7 @@ import ( // CompositeDateHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L176-L184 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L187-L195 type CompositeDateHistogramAggregation struct { // CalendarInterval Either `calendar_interval` or `fixed_interval` must be present CalendarInterval *string `json:"calendar_interval,omitempty"` @@ -151,3 +151,13 @@ func NewCompositeDateHistogramAggregation() *CompositeDateHistogramAggregation { return r } + +// true + +type CompositeDateHistogramAggregationVariant interface { + CompositeDateHistogramAggregationCaster() *CompositeDateHistogramAggregation +} + +func (s *CompositeDateHistogramAggregation) CompositeDateHistogramAggregationCaster() *CompositeDateHistogramAggregation { + return s +} diff --git a/typedapi/types/compositegeotilegridaggregation.go b/typedapi/types/compositegeotilegridaggregation.go index da2a7be1d7..37204f548b 100644 --- a/typedapi/types/compositegeotilegridaggregation.go +++ b/typedapi/types/compositegeotilegridaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -35,7 +35,7 @@ import ( // CompositeGeoTileGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L186-L189 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L197-L200 type CompositeGeoTileGridAggregation struct { Bounds GeoBounds `json:"bounds,omitempty"` // Field Either `field` or `script` must be present @@ -193,3 +193,13 @@ func NewCompositeGeoTileGridAggregation() *CompositeGeoTileGridAggregation { return r } + +// true + +type CompositeGeoTileGridAggregationVariant interface { + CompositeGeoTileGridAggregationCaster() *CompositeGeoTileGridAggregation +} + +func (s *CompositeGeoTileGridAggregation) CompositeGeoTileGridAggregationCaster() *CompositeGeoTileGridAggregation { + return s +} diff --git a/typedapi/types/compositehistogramaggregation.go b/typedapi/types/compositehistogramaggregation.go index 90db1da0d4..32115872fc 100644 --- a/typedapi/types/compositehistogramaggregation.go +++ b/typedapi/types/compositehistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -35,7 +35,7 @@ import ( // CompositeHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L172-L174 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L183-L185 type CompositeHistogramAggregation struct { // Field Either `field` or `script` must be present Field *string `json:"field,omitempty"` @@ -129,3 +129,13 @@ func NewCompositeHistogramAggregation() *CompositeHistogramAggregation { return r } + +// true + +type CompositeHistogramAggregationVariant interface { + CompositeHistogramAggregationCaster() *CompositeHistogramAggregation +} + +func (s *CompositeHistogramAggregation) CompositeHistogramAggregationCaster() *CompositeHistogramAggregation { + return s +} diff --git a/typedapi/types/compositesubfield.go b/typedapi/types/compositesubfield.go new file mode 100644 index 0000000000..7af0126617 --- /dev/null +++ b/typedapi/types/compositesubfield.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/runtimefieldtype" +) + +// CompositeSubField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/RuntimeFields.ts#L52-L54 +type CompositeSubField struct { + Type runtimefieldtype.RuntimeFieldType `json:"type"` +} + +// NewCompositeSubField returns a CompositeSubField. +func NewCompositeSubField() *CompositeSubField { + r := &CompositeSubField{} + + return r +} + +// true + +type CompositeSubFieldVariant interface { + CompositeSubFieldCaster() *CompositeSubField +} + +func (s *CompositeSubField) CompositeSubFieldCaster() *CompositeSubField { + return s +} diff --git a/typedapi/types/compositetermsaggregation.go b/typedapi/types/compositetermsaggregation.go index e3e9532408..b6c72b163c 100644 --- a/typedapi/types/compositetermsaggregation.go +++ b/typedapi/types/compositetermsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -35,7 +35,7 @@ import ( // CompositeTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L170-L170 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L181-L181 type CompositeTermsAggregation struct { // Field Either `field` or `script` must be present Field *string `json:"field,omitempty"` @@ -112,3 +112,13 @@ func NewCompositeTermsAggregation() *CompositeTermsAggregation { return r } + +// true + +type CompositeTermsAggregationVariant interface { + CompositeTermsAggregationCaster() *CompositeTermsAggregation +} + +func (s *CompositeTermsAggregation) CompositeTermsAggregationCaster() *CompositeTermsAggregation { + return s +} diff --git a/typedapi/types/conditiontokenfilter.go b/typedapi/types/conditiontokenfilter.go index 3988f9be61..5f40c83bf1 100644 --- a/typedapi/types/conditiontokenfilter.go +++ b/typedapi/types/conditiontokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ConditionTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L183-L187 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L182-L186 type ConditionTokenFilter struct { Filter []string `json:"filter"` Script Script `json:"script"` @@ -99,3 +99,13 @@ func NewConditionTokenFilter() *ConditionTokenFilter { return r } + +// true + +type ConditionTokenFilterVariant interface { + ConditionTokenFilterCaster() *ConditionTokenFilter +} + +func (s *ConditionTokenFilter) ConditionTokenFilterCaster() *ConditionTokenFilter { + return s +} diff --git a/typedapi/types/configuration.go b/typedapi/types/configuration.go index e8eee95679..9091ed74ad 100644 --- a/typedapi/types/configuration.go +++ b/typedapi/types/configuration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Configuration type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/_types/SnapshotLifecycle.ts#L99-L129 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/_types/SnapshotLifecycle.ts#L109-L139 type Configuration struct { // FeatureStates A list of feature states to be included in this snapshot. A list of features // available for inclusion in the snapshot and their descriptions be can be @@ -158,3 +158,13 @@ func NewConfiguration() *Configuration { return r } + +// true + +type ConfigurationVariant interface { + ConfigurationCaster() *Configuration +} + +func (s *Configuration) ConfigurationCaster() *Configuration { + return s +} diff --git a/typedapi/types/confusionmatrixitem.go b/typedapi/types/confusionmatrixitem.go index 627145ce5d..5f8aef7da4 100644 --- a/typedapi/types/confusionmatrixitem.go +++ b/typedapi/types/confusionmatrixitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ConfusionMatrixItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L125-L130 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L125-L130 type ConfusionMatrixItem struct { ActualClass string `json:"actual_class"` ActualClassDocCount int `json:"actual_class_doc_count"` @@ -107,3 +107,5 @@ func NewConfusionMatrixItem() *ConfusionMatrixItem { return r } + +// false diff --git a/typedapi/types/confusionmatrixprediction.go b/typedapi/types/confusionmatrixprediction.go index 9d41088022..50ffb76e9a 100644 --- a/typedapi/types/confusionmatrixprediction.go +++ b/typedapi/types/confusionmatrixprediction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ConfusionMatrixPrediction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L132-L135 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L132-L135 type ConfusionMatrixPrediction struct { Count int `json:"count"` PredictedClass string `json:"predicted_class"` @@ -84,3 +84,5 @@ func NewConfusionMatrixPrediction() *ConfusionMatrixPrediction { return r } + +// false diff --git a/typedapi/types/confusionmatrixthreshold.go b/typedapi/types/confusionmatrixthreshold.go index 00501fc320..54b45b6613 100644 --- a/typedapi/types/confusionmatrixthreshold.go +++ b/typedapi/types/confusionmatrixthreshold.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ConfusionMatrixThreshold type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L137-L158 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L137-L158 type ConfusionMatrixThreshold struct { // FalseNegative False Negative FalseNegative int `json:"fn"` @@ -133,3 +133,5 @@ func NewConfusionMatrixThreshold() *ConfusionMatrixThreshold { return r } + +// false diff --git a/typedapi/types/connection.go b/typedapi/types/connection.go index fa5e315871..23dbedc4b0 100644 --- a/typedapi/types/connection.go +++ b/typedapi/types/connection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Connection type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/graph/_types/Connection.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/graph/_types/Connection.ts#L22-L27 type Connection struct { DocCount int64 `json:"doc_count"` Source int64 `json:"source"` @@ -126,3 +126,5 @@ func NewConnection() *Connection { return r } + +// false diff --git a/typedapi/types/connector.go b/typedapi/types/connector.go index 333929a9a3..350b9b30ba 100644 --- a/typedapi/types/connector.go +++ b/typedapi/types/connector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // Connector type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L237-L268 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L252-L283 type Connector struct { ApiKeyId *string `json:"api_key_id,omitempty"` ApiKeySecretId *string `json:"api_key_secret_id,omitempty"` @@ -345,3 +345,5 @@ func NewConnector() *Connector { return r } + +// false diff --git a/typedapi/types/connectorconfigproperties.go b/typedapi/types/connectorconfigproperties.go index 47d5e1b256..ee194b1273 100644 --- a/typedapi/types/connectorconfigproperties.go +++ b/typedapi/types/connectorconfigproperties.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // ConnectorConfigProperties type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L83-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L83-L99 type ConnectorConfigProperties struct { Category *string `json:"category,omitempty"` DefaultValue ScalarValue `json:"default_value"` @@ -207,37 +207,37 @@ func (s *ConnectorConfigProperties) UnmarshalJSON(data []byte) error { case "less_than": o := NewLessThanValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "less_than", err) } s.Validations = append(s.Validations, *o) case "greater_than": o := NewGreaterThanValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "greater_than", err) } s.Validations = append(s.Validations, *o) case "list_type": o := NewListTypeValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "list_type", err) } s.Validations = append(s.Validations, *o) case "included_in": o := NewIncludedInValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "included_in", err) } s.Validations = append(s.Validations, *o) case "regex": o := NewRegexValidation() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "regex", err) } s.Validations = append(s.Validations, *o) default: o := new(any) if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("Validations | %w", err) } s.Validations = append(s.Validations, *o) } @@ -259,3 +259,13 @@ func NewConnectorConfigProperties() *ConnectorConfigProperties { return r } + +// true + +type ConnectorConfigPropertiesVariant interface { + ConnectorConfigPropertiesCaster() *ConnectorConfigProperties +} + +func (s *ConnectorConfigProperties) ConnectorConfigPropertiesCaster() *ConnectorConfigProperties { + return s +} diff --git a/typedapi/types/connectorconfiguration.go b/typedapi/types/connectorconfiguration.go index 34c23de760..2be732ee49 100644 --- a/typedapi/types/connectorconfiguration.go +++ b/typedapi/types/connectorconfiguration.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ConnectorConfiguration type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L101-L104 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L101-L104 type ConnectorConfiguration map[string]ConnectorConfigProperties + +type ConnectorConfigurationVariant interface { + ConnectorConfigurationCaster() *ConnectorConfiguration +} diff --git a/typedapi/types/connectorcustomscheduling.go b/typedapi/types/connectorcustomscheduling.go index 3dbbbb1c4a..6b7e284e25 100644 --- a/typedapi/types/connectorcustomscheduling.go +++ b/typedapi/types/connectorcustomscheduling.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ConnectorCustomScheduling type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L128-L128 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L128-L128 type ConnectorCustomScheduling map[string]CustomScheduling diff --git a/typedapi/types/connectorfeatures.go b/typedapi/types/connectorfeatures.go index 48c06c4ca2..29476d55ce 100644 --- a/typedapi/types/connectorfeatures.go +++ b/typedapi/types/connectorfeatures.go @@ -16,16 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ConnectorFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L224-L229 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L230-L244 type ConnectorFeatures struct { - DocumentLevelSecurity *FeatureEnabled `json:"document_level_security,omitempty"` - IncrementalSync *FeatureEnabled `json:"incremental_sync,omitempty"` + // DocumentLevelSecurity Indicates whether document-level security is enabled. + DocumentLevelSecurity *FeatureEnabled `json:"document_level_security,omitempty"` + // IncrementalSync Indicates whether incremental syncs are enabled. + IncrementalSync *FeatureEnabled `json:"incremental_sync,omitempty"` + // NativeConnectorApiKeys Indicates whether managed connector API keys are enabled. NativeConnectorApiKeys *FeatureEnabled `json:"native_connector_api_keys,omitempty"` SyncRules *SyncRulesFeature `json:"sync_rules,omitempty"` } @@ -36,3 +39,13 @@ func NewConnectorFeatures() *ConnectorFeatures { return r } + +// true + +type ConnectorFeaturesVariant interface { + ConnectorFeaturesCaster() *ConnectorFeatures +} + +func (s *ConnectorFeatures) ConnectorFeaturesCaster() *ConnectorFeatures { + return s +} diff --git a/typedapi/types/connectorscheduling.go b/typedapi/types/connectorscheduling.go index 8f21fedaca..d188cf4358 100644 --- a/typedapi/types/connectorscheduling.go +++ b/typedapi/types/connectorscheduling.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ConnectorScheduling type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L106-L110 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L106-L110 type ConnectorScheduling struct { Enabled bool `json:"enabled"` // Interval The interval is expressed using the crontab syntax @@ -90,3 +90,13 @@ func NewConnectorScheduling() *ConnectorScheduling { return r } + +// true + +type ConnectorSchedulingVariant interface { + ConnectorSchedulingCaster() *ConnectorScheduling +} + +func (s *ConnectorScheduling) ConnectorSchedulingCaster() *ConnectorScheduling { + return s +} diff --git a/typedapi/types/connectorsyncjob.go b/typedapi/types/connectorsyncjob.go index 261c60d64b..6fdca5ee1d 100644 --- a/typedapi/types/connectorsyncjob.go +++ b/typedapi/types/connectorsyncjob.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -35,7 +35,7 @@ import ( // ConnectorSyncJob type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/SyncJob.ts#L53-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/SyncJob.ts#L53-L72 type ConnectorSyncJob struct { CancelationRequestedAt DateTime `json:"cancelation_requested_at,omitempty"` CanceledAt DateTime `json:"canceled_at,omitempty"` @@ -227,8 +227,10 @@ func (s *ConnectorSyncJob) UnmarshalJSON(data []byte) error { // NewConnectorSyncJob returns a ConnectorSyncJob. func NewConnectorSyncJob() *ConnectorSyncJob { r := &ConnectorSyncJob{ - Metadata: make(map[string]json.RawMessage, 0), + Metadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/constantkeywordproperty.go b/typedapi/types/constantkeywordproperty.go index efd2691ad8..19ea4bdd54 100644 --- a/typedapi/types/constantkeywordproperty.go +++ b/typedapi/types/constantkeywordproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // ConstantKeywordProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/specialized.ts#L50-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L50-L53 type ConstantKeywordProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` - Value json.RawMessage `json:"value,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` + Value json.RawMessage `json:"value,omitempty"` } func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -427,306 +441,323 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -746,13 +777,14 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { func (s ConstantKeywordProperty) MarshalJSON() ([]byte, error) { type innerConstantKeywordProperty ConstantKeywordProperty tmp := innerConstantKeywordProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Type: s.Type, - Value: s.Value, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + Value: s.Value, } tmp.Type = "constant_keyword" @@ -763,10 +795,20 @@ func (s ConstantKeywordProperty) MarshalJSON() ([]byte, error) { // NewConstantKeywordProperty returns a ConstantKeywordProperty. func NewConstantKeywordProperty() *ConstantKeywordProperty { r := &ConstantKeywordProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ConstantKeywordPropertyVariant interface { + ConstantKeywordPropertyCaster() *ConstantKeywordProperty +} + +func (s *ConstantKeywordProperty) ConstantKeywordPropertyCaster() *ConstantKeywordProperty { + return s +} diff --git a/typedapi/types/constantscorequery.go b/typedapi/types/constantscorequery.go index d8af2ef08a..5059ee1f87 100644 --- a/typedapi/types/constantscorequery.go +++ b/typedapi/types/constantscorequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ConstantScoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L70-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L76-L86 type ConstantScoreQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -43,7 +43,7 @@ type ConstantScoreQuery struct { // Filter queries do not calculate relevance scores. // To speed up performance, Elasticsearch automatically caches frequently used // filter queries. - Filter *Query `json:"filter,omitempty"` + Filter Query `json:"filter"` QueryName_ *string `json:"_name,omitempty"` } @@ -106,3 +106,13 @@ func NewConstantScoreQuery() *ConstantScoreQuery { return r } + +// true + +type ConstantScoreQueryVariant interface { + ConstantScoreQueryCaster() *ConstantScoreQuery +} + +func (s *ConstantScoreQuery) ConstantScoreQueryCaster() *ConstantScoreQuery { + return s +} diff --git a/typedapi/types/contentobject.go b/typedapi/types/contentobject.go new file mode 100644 index 0000000000..66b51840d3 --- /dev/null +++ b/typedapi/types/contentobject.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ContentObject type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L94-L106 +type ContentObject struct { + // Text The text content. + Text string `json:"text"` + // Type The type of content. + Type string `json:"type"` +} + +func (s *ContentObject) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewContentObject returns a ContentObject. +func NewContentObject() *ContentObject { + r := &ContentObject{} + + return r +} + +// true + +type ContentObjectVariant interface { + ContentObjectCaster() *ContentObject +} + +func (s *ContentObject) ContentObjectCaster() *ContentObject { + return s +} diff --git a/typedapi/types/context.go b/typedapi/types/context.go index 021b4a28af..283c1e042b 100644 --- a/typedapi/types/context.go +++ b/typedapi/types/context.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // GeoLocation // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L228-L233 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L228-L233 type Context any + +type ContextVariant interface { + ContextCaster() *Context +} diff --git a/typedapi/types/contextmethod.go b/typedapi/types/contextmethod.go index 8fa7fe7e35..e3711f13e4 100644 --- a/typedapi/types/contextmethod.go +++ b/typedapi/types/contextmethod.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ContextMethod type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get_script_context/types.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get_script_context/types.ts#L27-L31 type ContextMethod struct { Name string `json:"name"` Params []ContextMethodParam `json:"params"` @@ -86,3 +86,5 @@ func NewContextMethod() *ContextMethod { return r } + +// false diff --git a/typedapi/types/contextmethodparam.go b/typedapi/types/contextmethodparam.go index 98051bff18..8d25dbb6c6 100644 --- a/typedapi/types/contextmethodparam.go +++ b/typedapi/types/contextmethodparam.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ContextMethodParam type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get_script_context/types.ts#L33-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get_script_context/types.ts#L33-L36 type ContextMethodParam struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewContextMethodParam() *ContextMethodParam { return r } + +// false diff --git a/typedapi/types/convertprocessor.go b/typedapi/types/convertprocessor.go index 5c660d4375..91fe02bef3 100644 --- a/typedapi/types/convertprocessor.go +++ b/typedapi/types/convertprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // ConvertProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L445-L465 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L672-L692 type ConvertProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -169,3 +169,13 @@ func NewConvertProcessor() *ConvertProcessor { return r } + +// true + +type ConvertProcessorVariant interface { + ConvertProcessorCaster() *ConvertProcessor +} + +func (s *ConvertProcessor) ConvertProcessorCaster() *ConvertProcessor { + return s +} diff --git a/typedapi/types/coordinatorstats.go b/typedapi/types/coordinatorstats.go index ad82c6b23c..8898c2c0e1 100644 --- a/typedapi/types/coordinatorstats.go +++ b/typedapi/types/coordinatorstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CoordinatorStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/stats/types.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/stats/types.ts#L30-L36 type CoordinatorStats struct { ExecutedSearchesTotal int64 `json:"executed_searches_total"` NodeId string `json:"node_id"` @@ -133,3 +133,5 @@ func NewCoordinatorStats() *CoordinatorStats { return r } + +// false diff --git a/typedapi/types/coordsgeobounds.go b/typedapi/types/coordsgeobounds.go index 0d81bd7a85..55da6e12d7 100644 --- a/typedapi/types/coordsgeobounds.go +++ b/typedapi/types/coordsgeobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CoordsGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L154-L159 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L154-L159 type CoordsGeoBounds struct { Bottom Float64 `json:"bottom"` Left Float64 `json:"left"` @@ -129,3 +129,13 @@ func NewCoordsGeoBounds() *CoordsGeoBounds { return r } + +// true + +type CoordsGeoBoundsVariant interface { + CoordsGeoBoundsCaster() *CoordsGeoBounds +} + +func (s *CoordsGeoBounds) CoordsGeoBoundsCaster() *CoordsGeoBounds { + return s +} diff --git a/typedapi/types/coreknnquery.go b/typedapi/types/coreknnquery.go index 574a318c89..e1498f3275 100644 --- a/typedapi/types/coreknnquery.go +++ b/typedapi/types/coreknnquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CoreKnnQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/knn_search/_types/Knn.ts#L24-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/knn_search/_types/Knn.ts#L24-L33 type CoreKnnQuery struct { // Field The name of the vector field to search against Field string `json:"field"` @@ -111,3 +111,13 @@ func NewCoreKnnQuery() *CoreKnnQuery { return r } + +// true + +type CoreKnnQueryVariant interface { + CoreKnnQueryCaster() *CoreKnnQuery +} + +func (s *CoreKnnQuery) CoreKnnQueryCaster() *CoreKnnQuery { + return s +} diff --git a/typedapi/types/countedkeywordproperty.go b/typedapi/types/countedkeywordproperty.go new file mode 100644 index 0000000000..a5528b4768 --- /dev/null +++ b/typedapi/types/countedkeywordproperty.go @@ -0,0 +1,823 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +// CountedKeywordProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L55-L62 +type CountedKeywordProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CountedKeywordProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CountedKeywordProperty) MarshalJSON() ([]byte, error) { + type innerCountedKeywordProperty CountedKeywordProperty + tmp := innerCountedKeywordProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "counted_keyword" + + return json.Marshal(tmp) +} + +// NewCountedKeywordProperty returns a CountedKeywordProperty. +func NewCountedKeywordProperty() *CountedKeywordProperty { + r := &CountedKeywordProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +// true + +type CountedKeywordPropertyVariant interface { + CountedKeywordPropertyCaster() *CountedKeywordProperty +} + +func (s *CountedKeywordProperty) CountedKeywordPropertyCaster() *CountedKeywordProperty { + return s +} diff --git a/typedapi/types/counter.go b/typedapi/types/counter.go index 65f4278117..ef5238d3c9 100644 --- a/typedapi/types/counter.go +++ b/typedapi/types/counter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Counter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L32-L35 type Counter struct { Active int64 `json:"active"` Total int64 `json:"total"` @@ -93,3 +93,5 @@ func NewCounter() *Counter { return r } + +// false diff --git a/typedapi/types/countrecord.go b/typedapi/types/countrecord.go index 199c9cf113..32dc16942f 100644 --- a/typedapi/types/countrecord.go +++ b/typedapi/types/countrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CountRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/count/types.ts#L23-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/count/types.ts#L23-L39 type CountRecord struct { // Count the document count Count *string `json:"count,omitempty"` @@ -89,3 +89,5 @@ func NewCountRecord() *CountRecord { return r } + +// false diff --git a/typedapi/types/cpu.go b/typedapi/types/cpu.go index d6b162f634..862f083a0b 100644 --- a/typedapi/types/cpu.go +++ b/typedapi/types/cpu.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Cpu type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L539-L548 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L575-L584 type Cpu struct { LoadAverage map[string]Float64 `json:"load_average,omitempty"` Percent *int `json:"percent,omitempty"` @@ -120,8 +120,10 @@ func (s *Cpu) UnmarshalJSON(data []byte) error { // NewCpu returns a Cpu. func NewCpu() *Cpu { r := &Cpu{ - LoadAverage: make(map[string]Float64, 0), + LoadAverage: make(map[string]Float64), } return r } + +// false diff --git a/typedapi/types/cpuacct.go b/typedapi/types/cpuacct.go index 40c0c1a5f5..41e80cc787 100644 --- a/typedapi/types/cpuacct.go +++ b/typedapi/types/cpuacct.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CpuAcct type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L476-L485 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L512-L521 type CpuAcct struct { // ControlGroup The `cpuacct` control group to which the Elasticsearch process belongs. ControlGroup *string `json:"control_group,omitempty"` @@ -83,3 +83,5 @@ func NewCpuAcct() *CpuAcct { return r } + +// false diff --git a/typedapi/types/createdstatus.go b/typedapi/types/createdstatus.go index d5c9f88737..b22442dc15 100644 --- a/typedapi/types/createdstatus.go +++ b/typedapi/types/createdstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CreatedStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/CreatedStatus.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/CreatedStatus.ts#L20-L22 type CreatedStatus struct { Created bool `json:"created"` } @@ -76,3 +76,5 @@ func NewCreatedStatus() *CreatedStatus { return r } + +// false diff --git a/typedapi/types/createfrom.go b/typedapi/types/createfrom.go new file mode 100644 index 0000000000..627a7e569f --- /dev/null +++ b/typedapi/types/createfrom.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CreateFrom type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/create_from/MigrateCreateFromRequest.ts#L46-L60 +type CreateFrom struct { + // MappingsOverride Mappings overrides to be applied to the destination index (optional) + MappingsOverride *TypeMapping `json:"mappings_override,omitempty"` + // RemoveIndexBlocks If index blocks should be removed when creating destination index (optional) + RemoveIndexBlocks *bool `json:"remove_index_blocks,omitempty"` + // SettingsOverride Settings overrides to be applied to the destination index (optional) + SettingsOverride *IndexSettings `json:"settings_override,omitempty"` +} + +func (s *CreateFrom) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mappings_override": + if err := dec.Decode(&s.MappingsOverride); err != nil { + return fmt.Errorf("%s | %w", "MappingsOverride", err) + } + + case "remove_index_blocks": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemoveIndexBlocks", err) + } + s.RemoveIndexBlocks = &value + case bool: + s.RemoveIndexBlocks = &v + } + + case "settings_override": + if err := dec.Decode(&s.SettingsOverride); err != nil { + return fmt.Errorf("%s | %w", "SettingsOverride", err) + } + + } + } + return nil +} + +// NewCreateFrom returns a CreateFrom. +func NewCreateFrom() *CreateFrom { + r := &CreateFrom{} + + return r +} + +// true + +type CreateFromVariant interface { + CreateFromCaster() *CreateFrom +} + +func (s *CreateFrom) CreateFromCaster() *CreateFrom { + return s +} diff --git a/typedapi/types/createoperation.go b/typedapi/types/createoperation.go index 0f1e094701..b1be1e037f 100644 --- a/typedapi/types/createoperation.go +++ b/typedapi/types/createoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,30 +33,30 @@ import ( // CreateOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/types.ts#L130-L130 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/types.ts#L140-L140 type CreateOperation struct { // DynamicTemplates A map from the full name of fields to the name of dynamic templates. - // Defaults to an empty map. - // If a name matches a dynamic template, then that template will be applied + // It defaults to an empty map. + // If a name matches a dynamic template, that template will be applied // regardless of other match predicates defined in the template. - // If a field is already defined in the mapping, then this parameter won’t be + // If a field is already defined in the mapping, then this parameter won't be // used. DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` // Id_ The document ID. Id_ *string `json:"_id,omitempty"` IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. + // Index_ The name of the index or index alias to perform the action on. Index_ *string `json:"_index,omitempty"` - // Pipeline ID of the pipeline to use to preprocess incoming documents. - // If the index has a default ingest pipeline specified, then setting the value - // to `_none` disables the default ingest pipeline for this request. - // If a final pipeline is configured it will always run, regardless of the value + // Pipeline The ID of the pipeline to use to preprocess incoming documents. + // If the index has a default ingest pipeline specified, setting the value to + // `_none` turns off the default ingest pipeline for this request. + // If a final pipeline is configured, it will always run regardless of the value // of this parameter. Pipeline *string `json:"pipeline,omitempty"` - // RequireAlias If `true`, the request’s actions must target an index alias. + // RequireAlias If `true`, the request's actions must target an index alias. RequireAlias *bool `json:"require_alias,omitempty"` - // Routing Custom value used to route operations to a specific shard. + // Routing A custom value used to route operations to a specific shard. Routing *string `json:"routing,omitempty"` Version *int64 `json:"version,omitempty"` VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -164,8 +164,18 @@ func (s *CreateOperation) UnmarshalJSON(data []byte) error { // NewCreateOperation returns a CreateOperation. func NewCreateOperation() *CreateOperation { r := &CreateOperation{ - DynamicTemplates: make(map[string]string, 0), + DynamicTemplates: make(map[string]string), } return r } + +// true + +type CreateOperationVariant interface { + CreateOperationCaster() *CreateOperation +} + +func (s *CreateOperation) CreateOperationCaster() *CreateOperation { + return s +} diff --git a/typedapi/types/csvprocessor.go b/typedapi/types/csvprocessor.go index 411c75bc22..9792ed4544 100644 --- a/typedapi/types/csvprocessor.go +++ b/typedapi/types/csvprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CsvProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L467-L500 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L694-L727 type CsvProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -224,3 +224,13 @@ func NewCsvProcessor() *CsvProcessor { return r } + +// true + +type CsvProcessorVariant interface { + CsvProcessorCaster() *CsvProcessor +} + +func (s *CsvProcessor) CsvProcessorCaster() *CsvProcessor { + return s +} diff --git a/typedapi/types/cumulativecardinalityaggregate.go b/typedapi/types/cumulativecardinalityaggregate.go index d86ebf103b..7cd634c684 100644 --- a/typedapi/types/cumulativecardinalityaggregate.go +++ b/typedapi/types/cumulativecardinalityaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CumulativeCardinalityAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L754-L762 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L856-L864 type CumulativeCardinalityAggregate struct { Meta Metadata `json:"meta,omitempty"` Value int64 `json:"value"` @@ -96,3 +96,5 @@ func NewCumulativeCardinalityAggregate() *CumulativeCardinalityAggregate { return r } + +// false diff --git a/typedapi/types/cumulativecardinalityaggregation.go b/typedapi/types/cumulativecardinalityaggregation.go index 9c2ff53d5f..c6d86a0db5 100644 --- a/typedapi/types/cumulativecardinalityaggregation.go +++ b/typedapi/types/cumulativecardinalityaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // CumulativeCardinalityAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L192-L192 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L206-L209 type CumulativeCardinalityAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewCumulativeCardinalityAggregation() *CumulativeCardinalityAggregation { return r } + +// true + +type CumulativeCardinalityAggregationVariant interface { + CumulativeCardinalityAggregationCaster() *CumulativeCardinalityAggregation +} + +func (s *CumulativeCardinalityAggregation) CumulativeCardinalityAggregationCaster() *CumulativeCardinalityAggregation { + return s +} diff --git a/typedapi/types/cumulativesumaggregation.go b/typedapi/types/cumulativesumaggregation.go index bbc4f92eed..d71ec88bc9 100644 --- a/typedapi/types/cumulativesumaggregation.go +++ b/typedapi/types/cumulativesumaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // CumulativeSumAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L194-L194 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L211-L214 type CumulativeSumAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewCumulativeSumAggregation() *CumulativeSumAggregation { return r } + +// true + +type CumulativeSumAggregationVariant interface { + CumulativeSumAggregationCaster() *CumulativeSumAggregation +} + +func (s *CumulativeSumAggregation) CumulativeSumAggregationCaster() *CumulativeSumAggregation { + return s +} diff --git a/typedapi/types/currentnode.go b/typedapi/types/currentnode.go index 422bef6547..70dc422c8c 100644 --- a/typedapi/types/currentnode.go +++ b/typedapi/types/currentnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,17 +27,20 @@ import ( "fmt" "io" "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) // CurrentNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L78-L84 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L79-L90 type CurrentNode struct { - Attributes map[string]string `json:"attributes"` - Id string `json:"id"` - Name string `json:"name"` - TransportAddress string `json:"transport_address"` - WeightRanking int `json:"weight_ranking"` + Attributes map[string]string `json:"attributes"` + Id string `json:"id"` + Name string `json:"name"` + Roles []noderole.NodeRole `json:"roles"` + TransportAddress string `json:"transport_address"` + WeightRanking int `json:"weight_ranking"` } func (s *CurrentNode) UnmarshalJSON(data []byte) error { @@ -73,6 +76,11 @@ func (s *CurrentNode) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Name", err) } + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + case "transport_address": if err := dec.Decode(&s.TransportAddress); err != nil { return fmt.Errorf("%s | %w", "TransportAddress", err) @@ -102,8 +110,10 @@ func (s *CurrentNode) UnmarshalJSON(data []byte) error { // NewCurrentNode returns a CurrentNode. func NewCurrentNode() *CurrentNode { r := &CurrentNode{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/customanalyzer.go b/typedapi/types/customanalyzer.go index bc27355748..13bc251b73 100644 --- a/typedapi/types/customanalyzer.go +++ b/typedapi/types/customanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CustomAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L28-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L28-L35 type CustomAnalyzer struct { CharFilter []string `json:"char_filter,omitempty"` Filter []string `json:"filter,omitempty"` @@ -57,13 +57,35 @@ func (s *CustomAnalyzer) UnmarshalJSON(data []byte) error { switch t { case "char_filter": - if err := dec.Decode(&s.CharFilter); err != nil { - return fmt.Errorf("%s | %w", "CharFilter", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CharFilter", err) + } + + s.CharFilter = append(s.CharFilter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CharFilter); err != nil { + return fmt.Errorf("%s | %w", "CharFilter", err) + } } case "filter": - if err := dec.Decode(&s.Filter); err != nil { - return fmt.Errorf("%s | %w", "Filter", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } } case "position_increment_gap": @@ -143,3 +165,13 @@ func NewCustomAnalyzer() *CustomAnalyzer { return r } + +// true + +type CustomAnalyzerVariant interface { + CustomAnalyzerCaster() *CustomAnalyzer +} + +func (s *CustomAnalyzer) CustomAnalyzerCaster() *CustomAnalyzer { + return s +} diff --git a/typedapi/types/customcategorizetextanalyzer.go b/typedapi/types/customcategorizetextanalyzer.go index dfd46db430..d3b997921e 100644 --- a/typedapi/types/customcategorizetextanalyzer.go +++ b/typedapi/types/customcategorizetextanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CustomCategorizeTextAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1116-L1120 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1189-L1193 type CustomCategorizeTextAnalyzer struct { CharFilter []string `json:"char_filter,omitempty"` Filter []string `json:"filter,omitempty"` @@ -86,3 +86,13 @@ func NewCustomCategorizeTextAnalyzer() *CustomCategorizeTextAnalyzer { return r } + +// true + +type CustomCategorizeTextAnalyzerVariant interface { + CustomCategorizeTextAnalyzerCaster() *CustomCategorizeTextAnalyzer +} + +func (s *CustomCategorizeTextAnalyzer) CustomCategorizeTextAnalyzerCaster() *CustomCategorizeTextAnalyzer { + return s +} diff --git a/typedapi/types/customnormalizer.go b/typedapi/types/customnormalizer.go index 821ba4c3e5..d9efbd2715 100644 --- a/typedapi/types/customnormalizer.go +++ b/typedapi/types/customnormalizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // CustomNormalizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/normalizers.ts#L30-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/normalizers.ts#L30-L34 type CustomNormalizer struct { CharFilter []string `json:"char_filter,omitempty"` Filter []string `json:"filter,omitempty"` @@ -53,3 +53,13 @@ func NewCustomNormalizer() *CustomNormalizer { return r } + +// true + +type CustomNormalizerVariant interface { + CustomNormalizerCaster() *CustomNormalizer +} + +func (s *CustomNormalizer) CustomNormalizerCaster() *CustomNormalizer { + return s +} diff --git a/typedapi/types/customscheduling.go b/typedapi/types/customscheduling.go index 1696869bea..12864f578e 100644 --- a/typedapi/types/customscheduling.go +++ b/typedapi/types/customscheduling.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CustomScheduling type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L120-L126 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L120-L126 type CustomScheduling struct { ConfigurationOverrides CustomSchedulingConfigurationOverrides `json:"configuration_overrides"` Enabled bool `json:"enabled"` @@ -114,3 +114,5 @@ func NewCustomScheduling() *CustomScheduling { return r } + +// false diff --git a/typedapi/types/customschedulingconfigurationoverrides.go b/typedapi/types/customschedulingconfigurationoverrides.go index 92bad17bac..6d06195844 100644 --- a/typedapi/types/customschedulingconfigurationoverrides.go +++ b/typedapi/types/customschedulingconfigurationoverrides.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // CustomSchedulingConfigurationOverrides type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L112-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L112-L118 type CustomSchedulingConfigurationOverrides struct { DomainAllowlist []string `json:"domain_allowlist,omitempty"` MaxCrawlDepth *int `json:"max_crawl_depth,omitempty"` @@ -111,3 +111,5 @@ func NewCustomSchedulingConfigurationOverrides() *CustomSchedulingConfigurationO return r } + +// false diff --git a/typedapi/types/czechanalyzer.go b/typedapi/types/czechanalyzer.go new file mode 100644 index 0000000000..c2f31e7d98 --- /dev/null +++ b/typedapi/types/czechanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CzechAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L112-L117 +type CzechAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CzechAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CzechAnalyzer) MarshalJSON() ([]byte, error) { + type innerCzechAnalyzer CzechAnalyzer + tmp := innerCzechAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "czech" + + return json.Marshal(tmp) +} + +// NewCzechAnalyzer returns a CzechAnalyzer. +func NewCzechAnalyzer() *CzechAnalyzer { + r := &CzechAnalyzer{} + + return r +} + +// true + +type CzechAnalyzerVariant interface { + CzechAnalyzerCaster() *CzechAnalyzer +} + +func (s *CzechAnalyzer) CzechAnalyzerCaster() *CzechAnalyzer { + return s +} diff --git a/typedapi/types/dailyschedule.go b/typedapi/types/dailyschedule.go index 56133cc173..db7a1eb6fa 100644 --- a/typedapi/types/dailyschedule.go +++ b/typedapi/types/dailyschedule.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DailySchedule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L33-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L33-L35 type DailySchedule struct { At []ScheduleTimeOfDay `json:"at"` } @@ -33,3 +33,13 @@ func NewDailySchedule() *DailySchedule { return r } + +// true + +type DailyScheduleVariant interface { + DailyScheduleCaster() *DailySchedule +} + +func (s *DailySchedule) DailyScheduleCaster() *DailySchedule { + return s +} diff --git a/typedapi/types/danglingindex.go b/typedapi/types/danglingindex.go index 62b8d64077..565f6d3e98 100644 --- a/typedapi/types/danglingindex.go +++ b/typedapi/types/danglingindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DanglingIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L29-L34 type DanglingIndex struct { CreationDateMillis int64 `json:"creation_date_millis"` IndexName string `json:"index_name"` @@ -110,3 +110,5 @@ func NewDanglingIndex() *DanglingIndex { return r } + +// false diff --git a/typedapi/types/danishanalyzer.go b/typedapi/types/danishanalyzer.go new file mode 100644 index 0000000000..71113312ba --- /dev/null +++ b/typedapi/types/danishanalyzer.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DanishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L119-L123 +type DanishAnalyzer struct { + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DanishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DanishAnalyzer) MarshalJSON() ([]byte, error) { + type innerDanishAnalyzer DanishAnalyzer + tmp := innerDanishAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "danish" + + return json.Marshal(tmp) +} + +// NewDanishAnalyzer returns a DanishAnalyzer. +func NewDanishAnalyzer() *DanishAnalyzer { + r := &DanishAnalyzer{} + + return r +} + +// true + +type DanishAnalyzerVariant interface { + DanishAnalyzerCaster() *DanishAnalyzer +} + +func (s *DanishAnalyzer) DanishAnalyzerCaster() *DanishAnalyzer { + return s +} diff --git a/typedapi/types/databaseconfiguration.go b/typedapi/types/databaseconfiguration.go new file mode 100644 index 0000000000..e2a1e272a2 --- /dev/null +++ b/typedapi/types/databaseconfiguration.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DatabaseConfiguration type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Database.ts#L22-L37 +type DatabaseConfiguration struct { + AdditionalDatabaseConfigurationProperty map[string]json.RawMessage `json:"-"` + Ipinfo *Ipinfo `json:"ipinfo,omitempty"` + Maxmind *Maxmind `json:"maxmind,omitempty"` + // Name The provider-assigned name of the IP geolocation database to download. + Name string `json:"name"` +} + +func (s *DatabaseConfiguration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ipinfo": + if err := dec.Decode(&s.Ipinfo); err != nil { + return fmt.Errorf("%s | %w", "Ipinfo", err) + } + + case "maxmind": + if err := dec.Decode(&s.Maxmind); err != nil { + return fmt.Errorf("%s | %w", "Maxmind", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalDatabaseConfigurationProperty == nil { + s.AdditionalDatabaseConfigurationProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalDatabaseConfigurationProperty", err) + } + s.AdditionalDatabaseConfigurationProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DatabaseConfiguration) MarshalJSON() ([]byte, error) { + type opt DatabaseConfiguration + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDatabaseConfigurationProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDatabaseConfigurationProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDatabaseConfiguration returns a DatabaseConfiguration. +func NewDatabaseConfiguration() *DatabaseConfiguration { + r := &DatabaseConfiguration{ + AdditionalDatabaseConfigurationProperty: make(map[string]json.RawMessage), + } + + return r +} + +// true + +type DatabaseConfigurationVariant interface { + DatabaseConfigurationCaster() *DatabaseConfiguration +} + +func (s *DatabaseConfiguration) DatabaseConfigurationCaster() *DatabaseConfiguration { + return s +} diff --git a/typedapi/types/databaseconfigurationfull.go b/typedapi/types/databaseconfigurationfull.go new file mode 100644 index 0000000000..1c28f8a45e --- /dev/null +++ b/typedapi/types/databaseconfigurationfull.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DatabaseConfigurationFull type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Database.ts#L39-L53 +type DatabaseConfigurationFull struct { + AdditionalDatabaseConfigurationFullProperty map[string]json.RawMessage `json:"-"` + Ipinfo *Ipinfo `json:"ipinfo,omitempty"` + Local *Local `json:"local,omitempty"` + Maxmind *Maxmind `json:"maxmind,omitempty"` + // Name The provider-assigned name of the IP geolocation database to download. + Name string `json:"name"` + Web *Web `json:"web,omitempty"` +} + +func (s *DatabaseConfigurationFull) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ipinfo": + if err := dec.Decode(&s.Ipinfo); err != nil { + return fmt.Errorf("%s | %w", "Ipinfo", err) + } + + case "local": + if err := dec.Decode(&s.Local); err != nil { + return fmt.Errorf("%s | %w", "Local", err) + } + + case "maxmind": + if err := dec.Decode(&s.Maxmind); err != nil { + return fmt.Errorf("%s | %w", "Maxmind", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "web": + if err := dec.Decode(&s.Web); err != nil { + return fmt.Errorf("%s | %w", "Web", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalDatabaseConfigurationFullProperty == nil { + s.AdditionalDatabaseConfigurationFullProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalDatabaseConfigurationFullProperty", err) + } + s.AdditionalDatabaseConfigurationFullProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DatabaseConfigurationFull) MarshalJSON() ([]byte, error) { + type opt DatabaseConfigurationFull + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDatabaseConfigurationFullProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDatabaseConfigurationFullProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDatabaseConfigurationFull returns a DatabaseConfigurationFull. +func NewDatabaseConfigurationFull() *DatabaseConfigurationFull { + r := &DatabaseConfigurationFull{ + AdditionalDatabaseConfigurationFullProperty: make(map[string]json.RawMessage), + } + + return r +} + +// false diff --git a/typedapi/types/datacounts.go b/typedapi/types/datacounts.go index a3aea15850..16d49e1bce 100644 --- a/typedapi/types/datacounts.go +++ b/typedapi/types/datacounts.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataCounts type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L352-L372 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L352-L372 type DataCounts struct { BucketCount int64 `json:"bucket_count"` EarliestRecordTimestamp *int64 `json:"earliest_record_timestamp,omitempty"` @@ -355,3 +355,5 @@ func NewDataCounts() *DataCounts { return r } + +// false diff --git a/typedapi/types/datadescription.go b/typedapi/types/datadescription.go index b6414c3a69..ae43e04ffa 100644 --- a/typedapi/types/datadescription.go +++ b/typedapi/types/datadescription.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataDescription type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L374-L390 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L374-L390 type DataDescription struct { FieldDelimiter *string `json:"field_delimiter,omitempty"` // Format Only JSON format is supported at this time. @@ -117,3 +117,13 @@ func NewDataDescription() *DataDescription { return r } + +// true + +type DataDescriptionVariant interface { + DataDescriptionCaster() *DataDescription +} + +func (s *DataDescription) DataDescriptionCaster() *DataDescription { + return s +} diff --git a/typedapi/types/dataemailattachment.go b/typedapi/types/dataemailattachment.go index 1e1b14cf4a..188d9fdc93 100644 --- a/typedapi/types/dataemailattachment.go +++ b/typedapi/types/dataemailattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // DataEmailAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L234-L236 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L234-L236 type DataEmailAttachment struct { Format *dataattachmentformat.DataAttachmentFormat `json:"format,omitempty"` } @@ -37,3 +37,13 @@ func NewDataEmailAttachment() *DataEmailAttachment { return r } + +// true + +type DataEmailAttachmentVariant interface { + DataEmailAttachmentCaster() *DataEmailAttachment +} + +func (s *DataEmailAttachment) DataEmailAttachmentCaster() *DataEmailAttachment { + return s +} diff --git a/typedapi/types/datafeedauthorization.go b/typedapi/types/datafeedauthorization.go index 4e6775fbea..e8c8e6b35b 100644 --- a/typedapi/types/datafeedauthorization.go +++ b/typedapi/types/datafeedauthorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DatafeedAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Authorization.ts#L31-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Authorization.ts#L31-L43 type DatafeedAuthorization struct { // ApiKey If an API key was used for the most recent update to the datafeed, its name // and identifier are listed in the response. @@ -92,3 +92,5 @@ func NewDatafeedAuthorization() *DatafeedAuthorization { return r } + +// false diff --git a/typedapi/types/datafeedconfig.go b/typedapi/types/datafeedconfig.go index c00df989fa..2515aee05e 100644 --- a/typedapi/types/datafeedconfig.go +++ b/typedapi/types/datafeedconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DatafeedConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L59-L116 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L63-L120 type DatafeedConfig struct { // Aggregations If set, the datafeed performs aggregation searches. Support for aggregations // is limited and should be used only with low cardinality data. @@ -233,9 +233,19 @@ func (s *DatafeedConfig) UnmarshalJSON(data []byte) error { // NewDatafeedConfig returns a DatafeedConfig. func NewDatafeedConfig() *DatafeedConfig { r := &DatafeedConfig{ - Aggregations: make(map[string]Aggregations, 0), - ScriptFields: make(map[string]ScriptField, 0), + Aggregations: make(map[string]Aggregations), + ScriptFields: make(map[string]ScriptField), } return r } + +// true + +type DatafeedConfigVariant interface { + DatafeedConfigCaster() *DatafeedConfig +} + +func (s *DatafeedConfig) DatafeedConfigCaster() *DatafeedConfig { + return s +} diff --git a/typedapi/types/datafeedrunningstate.go b/typedapi/types/datafeedrunningstate.go index 70ce741c41..dcd45985fe 100644 --- a/typedapi/types/datafeedrunningstate.go +++ b/typedapi/types/datafeedrunningstate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DatafeedRunningState type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L197-L211 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L210-L224 type DatafeedRunningState struct { // RealTimeConfigured Indicates if the datafeed is "real-time"; meaning that the datafeed has no // configured `end` time. @@ -104,3 +104,5 @@ func NewDatafeedRunningState() *DatafeedRunningState { return r } + +// false diff --git a/typedapi/types/datafeeds.go b/typedapi/types/datafeeds.go index ec7b96555f..d8ff4d1b64 100644 --- a/typedapi/types/datafeeds.go +++ b/typedapi/types/datafeeds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Datafeeds type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/info/types.ts#L40-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/info/types.ts#L42-L44 type Datafeeds struct { ScrollSize int `json:"scroll_size"` } @@ -78,3 +78,5 @@ func NewDatafeeds() *Datafeeds { return r } + +// false diff --git a/typedapi/types/datafeedsrecord.go b/typedapi/types/datafeedsrecord.go index 1c73474bca..da00e79fb9 100644 --- a/typedapi/types/datafeedsrecord.go +++ b/typedapi/types/datafeedsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DatafeedsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/ml_datafeeds/types.ts#L22-L87 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/ml_datafeeds/types.ts#L22-L87 type DatafeedsRecord struct { // AssignmentExplanation For started datafeeds only, contains messages relating to the selection of a // node. @@ -233,3 +233,5 @@ func NewDatafeedsRecord() *DatafeedsRecord { return r } + +// false diff --git a/typedapi/types/datafeedstats.go b/typedapi/types/datafeedstats.go index 95bc891dc0..eb09d91494 100644 --- a/typedapi/types/datafeedstats.go +++ b/typedapi/types/datafeedstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DatafeedStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L139-L168 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L143-L172 type DatafeedStats struct { // AssignmentExplanation For started datafeeds only, contains messages relating to the selection of a // node. @@ -45,7 +45,7 @@ type DatafeedStats struct { DatafeedId string `json:"datafeed_id"` // Node For started datafeeds only, this information pertains to the node upon which // the datafeed is started. - Node *DiscoveryNode `json:"node,omitempty"` + Node *DiscoveryNodeCompact `json:"node,omitempty"` // RunningState An object containing the running state for this datafeed. // It is only provided if the datafeed is started. RunningState *DatafeedRunningState `json:"running_state,omitempty"` @@ -54,7 +54,7 @@ type DatafeedStats struct { State datafeedstate.DatafeedState `json:"state"` // TimingStats An object that provides statistical information about timing aspect of this // datafeed. - TimingStats DatafeedTimingStats `json:"timing_stats"` + TimingStats *DatafeedTimingStats `json:"timing_stats,omitempty"` } func (s *DatafeedStats) UnmarshalJSON(data []byte) error { @@ -120,3 +120,5 @@ func NewDatafeedStats() *DatafeedStats { return r } + +// false diff --git a/typedapi/types/datafeedtimingstats.go b/typedapi/types/datafeedtimingstats.go index c0fbb60349..23ead17865 100644 --- a/typedapi/types/datafeedtimingstats.go +++ b/typedapi/types/datafeedtimingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,12 +31,13 @@ import ( // DatafeedTimingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L170-L195 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L174-L202 type DatafeedTimingStats struct { // AverageSearchTimePerBucketMs The average search time per bucket, in milliseconds. AverageSearchTimePerBucketMs Float64 `json:"average_search_time_per_bucket_ms,omitempty"` // BucketCount The number of buckets processed. - BucketCount int64 `json:"bucket_count"` + BucketCount int64 `json:"bucket_count"` + ExponentialAverageCalculationContext *ExponentialAverageCalculationContext `json:"exponential_average_calculation_context,omitempty"` // ExponentialAverageSearchTimePerHourMs The exponential average search time per hour, in milliseconds. ExponentialAverageSearchTimePerHourMs Float64 `json:"exponential_average_search_time_per_hour_ms"` // JobId Identifier for the anomaly detection job. @@ -82,6 +83,11 @@ func (s *DatafeedTimingStats) UnmarshalJSON(data []byte) error { s.BucketCount = f } + case "exponential_average_calculation_context": + if err := dec.Decode(&s.ExponentialAverageCalculationContext); err != nil { + return fmt.Errorf("%s | %w", "ExponentialAverageCalculationContext", err) + } + case "exponential_average_search_time_per_hour_ms": if err := dec.Decode(&s.ExponentialAverageSearchTimePerHourMs); err != nil { return fmt.Errorf("%s | %w", "ExponentialAverageSearchTimePerHourMs", err) @@ -123,3 +129,5 @@ func NewDatafeedTimingStats() *DatafeedTimingStats { return r } + +// false diff --git a/typedapi/types/dataframeanalysis.go b/typedapi/types/dataframeanalysis.go deleted file mode 100644 index 61ddb5d19a..0000000000 --- a/typedapi/types/dataframeanalysis.go +++ /dev/null @@ -1,425 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// DataframeAnalysis type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L134-L213 -type DataframeAnalysis struct { - // Alpha Advanced configuration option. Machine learning uses loss guided tree - // growing, which means that the decision trees grow where the regularized loss - // decreases most quickly. This parameter affects loss calculations by acting as - // a multiplier of the tree depth. Higher alpha values result in shallower trees - // and faster training times. By default, this value is calculated during - // hyperparameter optimization. It must be greater than or equal to zero. - Alpha *Float64 `json:"alpha,omitempty"` - // DependentVariable Defines which field of the document is to be predicted. It must match one of - // the fields in the index being used to train. If this field is missing from a - // document, then that document will not be used for training, but a prediction - // with the trained model will be generated for it. It is also known as - // continuous target variable. - // For classification analysis, the data type of the field must be numeric - // (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or - // `boolean`. There must be no more than 30 different values in this field. - // For regression analysis, the data type of the field must be numeric. - DependentVariable string `json:"dependent_variable"` - // DownsampleFactor Advanced configuration option. Controls the fraction of data that is used to - // compute the derivatives of the loss function for tree training. A small value - // results in the use of a small fraction of the data. If this value is set to - // be less than 1, accuracy typically improves. However, too small a value may - // result in poor convergence for the ensemble and so require more trees. By - // default, this value is calculated during hyperparameter optimization. It must - // be greater than zero and less than or equal to 1. - DownsampleFactor *Float64 `json:"downsample_factor,omitempty"` - // EarlyStoppingEnabled Advanced configuration option. Specifies whether the training process should - // finish if it is not finding any better performing models. If disabled, the - // training process can take significantly longer and the chance of finding a - // better performing model is unremarkable. - EarlyStoppingEnabled *bool `json:"early_stopping_enabled,omitempty"` - // Eta Advanced configuration option. The shrinkage applied to the weights. Smaller - // values result in larger forests which have a better generalization error. - // However, larger forests cause slower training. By default, this value is - // calculated during hyperparameter optimization. It must be a value between - // 0.001 and 1. - Eta *Float64 `json:"eta,omitempty"` - // EtaGrowthRatePerTree Advanced configuration option. Specifies the rate at which `eta` increases - // for each new tree that is added to the forest. For example, a rate of 1.05 - // increases `eta` by 5% for each extra tree. By default, this value is - // calculated during hyperparameter optimization. It must be between 0.5 and 2. - EtaGrowthRatePerTree *Float64 `json:"eta_growth_rate_per_tree,omitempty"` - // FeatureBagFraction Advanced configuration option. Defines the fraction of features that will be - // used when selecting a random bag for each candidate split. By default, this - // value is calculated during hyperparameter optimization. - FeatureBagFraction *Float64 `json:"feature_bag_fraction,omitempty"` - // FeatureProcessors Advanced configuration option. A collection of feature preprocessors that - // modify one or more included fields. The analysis uses the resulting one or - // more features instead of the original document field. However, these features - // are ephemeral; they are not stored in the destination index. Multiple - // `feature_processors` entries can refer to the same document fields. Automatic - // categorical feature encoding still occurs for the fields that are unprocessed - // by a custom processor or that have categorical values. Use this property only - // if you want to override the automatic feature encoding of the specified - // fields. - FeatureProcessors []DataframeAnalysisFeatureProcessor `json:"feature_processors,omitempty"` - // Gamma Advanced configuration option. Regularization parameter to prevent - // overfitting on the training data set. Multiplies a linear penalty associated - // with the size of individual trees in the forest. A high gamma value causes - // training to prefer small trees. A small gamma value results in larger - // individual trees and slower training. By default, this value is calculated - // during hyperparameter optimization. It must be a nonnegative value. - Gamma *Float64 `json:"gamma,omitempty"` - // Lambda Advanced configuration option. Regularization parameter to prevent - // overfitting on the training data set. Multiplies an L2 regularization term - // which applies to leaf weights of the individual trees in the forest. A high - // lambda value causes training to favor small leaf weights. This behavior makes - // the prediction function smoother at the expense of potentially not being able - // to capture relevant relationships between the features and the dependent - // variable. A small lambda value results in large individual trees and slower - // training. By default, this value is calculated during hyperparameter - // optimization. It must be a nonnegative value. - Lambda *Float64 `json:"lambda,omitempty"` - // MaxOptimizationRoundsPerHyperparameter Advanced configuration option. A multiplier responsible for determining the - // maximum number of hyperparameter optimization steps in the Bayesian - // optimization procedure. The maximum number of steps is determined based on - // the number of undefined hyperparameters times the maximum optimization rounds - // per hyperparameter. By default, this value is calculated during - // hyperparameter optimization. - MaxOptimizationRoundsPerHyperparameter *int `json:"max_optimization_rounds_per_hyperparameter,omitempty"` - // MaxTrees Advanced configuration option. Defines the maximum number of decision trees - // in the forest. The maximum value is 2000. By default, this value is - // calculated during hyperparameter optimization. - MaxTrees *int `json:"max_trees,omitempty"` - // NumTopFeatureImportanceValues Advanced configuration option. Specifies the maximum number of feature - // importance values per document to return. By default, no feature importance - // calculation occurs. - NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` - // PredictionFieldName Defines the name of the prediction field in the results. Defaults to - // `_prediction`. - PredictionFieldName *string `json:"prediction_field_name,omitempty"` - // RandomizeSeed Defines the seed for the random generator that is used to pick training data. - // By default, it is randomly generated. Set it to a specific value to use the - // same training data each time you start a job (assuming other related - // parameters such as `source` and `analyzed_fields` are the same). - RandomizeSeed *Float64 `json:"randomize_seed,omitempty"` - // SoftTreeDepthLimit Advanced configuration option. Machine learning uses loss guided tree - // growing, which means that the decision trees grow where the regularized loss - // decreases most quickly. This soft limit combines with the - // `soft_tree_depth_tolerance` to penalize trees that exceed the specified - // depth; the regularized loss increases quickly beyond this depth. By default, - // this value is calculated during hyperparameter optimization. It must be - // greater than or equal to 0. - SoftTreeDepthLimit *int `json:"soft_tree_depth_limit,omitempty"` - // SoftTreeDepthTolerance Advanced configuration option. This option controls how quickly the - // regularized loss increases when the tree depth exceeds - // `soft_tree_depth_limit`. By default, this value is calculated during - // hyperparameter optimization. It must be greater than or equal to 0.01. - SoftTreeDepthTolerance *Float64 `json:"soft_tree_depth_tolerance,omitempty"` - // TrainingPercent Defines what percentage of the eligible documents that will be used for - // training. Documents that are ignored by the analysis (for example those that - // contain arrays with more than one value) won’t be included in the calculation - // for used percentage. - TrainingPercent Percentage `json:"training_percent,omitempty"` -} - -func (s *DataframeAnalysis) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "alpha": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Alpha", err) - } - f := Float64(value) - s.Alpha = &f - case float64: - f := Float64(v) - s.Alpha = &f - } - - case "dependent_variable": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "DependentVariable", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.DependentVariable = o - - case "downsample_factor": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DownsampleFactor", err) - } - f := Float64(value) - s.DownsampleFactor = &f - case float64: - f := Float64(v) - s.DownsampleFactor = &f - } - - case "early_stopping_enabled": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "EarlyStoppingEnabled", err) - } - s.EarlyStoppingEnabled = &value - case bool: - s.EarlyStoppingEnabled = &v - } - - case "eta": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Eta", err) - } - f := Float64(value) - s.Eta = &f - case float64: - f := Float64(v) - s.Eta = &f - } - - case "eta_growth_rate_per_tree": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "EtaGrowthRatePerTree", err) - } - f := Float64(value) - s.EtaGrowthRatePerTree = &f - case float64: - f := Float64(v) - s.EtaGrowthRatePerTree = &f - } - - case "feature_bag_fraction": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "FeatureBagFraction", err) - } - f := Float64(value) - s.FeatureBagFraction = &f - case float64: - f := Float64(v) - s.FeatureBagFraction = &f - } - - case "feature_processors": - if err := dec.Decode(&s.FeatureProcessors); err != nil { - return fmt.Errorf("%s | %w", "FeatureProcessors", err) - } - - case "gamma": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Gamma", err) - } - f := Float64(value) - s.Gamma = &f - case float64: - f := Float64(v) - s.Gamma = &f - } - - case "lambda": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Lambda", err) - } - f := Float64(value) - s.Lambda = &f - case float64: - f := Float64(v) - s.Lambda = &f - } - - case "max_optimization_rounds_per_hyperparameter": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "MaxOptimizationRoundsPerHyperparameter", err) - } - s.MaxOptimizationRoundsPerHyperparameter = &value - case float64: - f := int(v) - s.MaxOptimizationRoundsPerHyperparameter = &f - } - - case "max_trees", "maximum_number_trees": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "MaxTrees", err) - } - s.MaxTrees = &value - case float64: - f := int(v) - s.MaxTrees = &f - } - - case "num_top_feature_importance_values": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumTopFeatureImportanceValues", err) - } - s.NumTopFeatureImportanceValues = &value - case float64: - f := int(v) - s.NumTopFeatureImportanceValues = &f - } - - case "prediction_field_name": - if err := dec.Decode(&s.PredictionFieldName); err != nil { - return fmt.Errorf("%s | %w", "PredictionFieldName", err) - } - - case "randomize_seed": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "RandomizeSeed", err) - } - f := Float64(value) - s.RandomizeSeed = &f - case float64: - f := Float64(v) - s.RandomizeSeed = &f - } - - case "soft_tree_depth_limit": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "SoftTreeDepthLimit", err) - } - s.SoftTreeDepthLimit = &value - case float64: - f := int(v) - s.SoftTreeDepthLimit = &f - } - - case "soft_tree_depth_tolerance": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SoftTreeDepthTolerance", err) - } - f := Float64(value) - s.SoftTreeDepthTolerance = &f - case float64: - f := Float64(v) - s.SoftTreeDepthTolerance = &f - } - - case "training_percent": - if err := dec.Decode(&s.TrainingPercent); err != nil { - return fmt.Errorf("%s | %w", "TrainingPercent", err) - } - - } - } - return nil -} - -// NewDataframeAnalysis returns a DataframeAnalysis. -func NewDataframeAnalysis() *DataframeAnalysis { - r := &DataframeAnalysis{} - - return r -} diff --git a/typedapi/types/dataframeanalysisanalyzedfields.go b/typedapi/types/dataframeanalysisanalyzedfields.go index 73506cddab..3359686ec4 100644 --- a/typedapi/types/dataframeanalysisanalyzedfields.go +++ b/typedapi/types/dataframeanalysisanalyzedfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DataframeAnalysisAnalyzedFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L238-L244 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L238-L244 type DataframeAnalysisAnalyzedFields struct { // Excludes An array of strings that defines the fields that will be included in the // analysis. @@ -87,3 +87,13 @@ func NewDataframeAnalysisAnalyzedFields() *DataframeAnalysisAnalyzedFields { return r } + +// true + +type DataframeAnalysisAnalyzedFieldsVariant interface { + DataframeAnalysisAnalyzedFieldsCaster() *DataframeAnalysisAnalyzedFields +} + +func (s *DataframeAnalysisAnalyzedFields) DataframeAnalysisAnalyzedFieldsCaster() *DataframeAnalysisAnalyzedFields { + return s +} diff --git a/typedapi/types/dataframeanalysisclassification.go b/typedapi/types/dataframeanalysisclassification.go index a851ac93f5..1606fb5231 100644 --- a/typedapi/types/dataframeanalysisclassification.go +++ b/typedapi/types/dataframeanalysisclassification.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisClassification type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L227-L236 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L227-L236 type DataframeAnalysisClassification struct { // Alpha Advanced configuration option. Machine learning uses loss guided tree // growing, which means that the decision trees grow where the regularized loss @@ -460,3 +460,13 @@ func NewDataframeAnalysisClassification() *DataframeAnalysisClassification { return r } + +// true + +type DataframeAnalysisClassificationVariant interface { + DataframeAnalysisClassificationCaster() *DataframeAnalysisClassification +} + +func (s *DataframeAnalysisClassification) DataframeAnalysisClassificationCaster() *DataframeAnalysisClassification { + return s +} diff --git a/typedapi/types/dataframeanalysiscontainer.go b/typedapi/types/dataframeanalysiscontainer.go index c4a213217e..5894986801 100644 --- a/typedapi/types/dataframeanalysiscontainer.go +++ b/typedapi/types/dataframeanalysiscontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // DataframeAnalysisContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L84-L101 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L84-L101 type DataframeAnalysisContainer struct { + AdditionalDataframeAnalysisContainerProperty map[string]json.RawMessage `json:"-"` // Classification The configuration information necessary to perform classification. Classification *DataframeAnalysisClassification `json:"classification,omitempty"` // OutlierDetection The configuration information necessary to perform outlier detection. NOTE: @@ -40,9 +46,50 @@ type DataframeAnalysisContainer struct { Regression *DataframeAnalysisRegression `json:"regression,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeAnalysisContainer) MarshalJSON() ([]byte, error) { + type opt DataframeAnalysisContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeAnalysisContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeAnalysisContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDataframeAnalysisContainer returns a DataframeAnalysisContainer. func NewDataframeAnalysisContainer() *DataframeAnalysisContainer { - r := &DataframeAnalysisContainer{} + r := &DataframeAnalysisContainer{ + AdditionalDataframeAnalysisContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type DataframeAnalysisContainerVariant interface { + DataframeAnalysisContainerCaster() *DataframeAnalysisContainer +} + +func (s *DataframeAnalysisContainer) DataframeAnalysisContainerCaster() *DataframeAnalysisContainer { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessor.go b/typedapi/types/dataframeanalysisfeatureprocessor.go index 502064fa1f..01ff2c222f 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessor.go +++ b/typedapi/types/dataframeanalysisfeatureprocessor.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // DataframeAnalysisFeatureProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L246-L258 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L246-L258 type DataframeAnalysisFeatureProcessor struct { + AdditionalDataframeAnalysisFeatureProcessorProperty map[string]json.RawMessage `json:"-"` // FrequencyEncoding The configuration information necessary to perform frequency encoding. FrequencyEncoding *DataframeAnalysisFeatureProcessorFrequencyEncoding `json:"frequency_encoding,omitempty"` // MultiEncoding The configuration information necessary to perform multi encoding. It allows @@ -41,9 +47,50 @@ type DataframeAnalysisFeatureProcessor struct { TargetMeanEncoding *DataframeAnalysisFeatureProcessorTargetMeanEncoding `json:"target_mean_encoding,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeAnalysisFeatureProcessor) MarshalJSON() ([]byte, error) { + type opt DataframeAnalysisFeatureProcessor + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeAnalysisFeatureProcessorProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeAnalysisFeatureProcessorProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDataframeAnalysisFeatureProcessor returns a DataframeAnalysisFeatureProcessor. func NewDataframeAnalysisFeatureProcessor() *DataframeAnalysisFeatureProcessor { - r := &DataframeAnalysisFeatureProcessor{} + r := &DataframeAnalysisFeatureProcessor{ + AdditionalDataframeAnalysisFeatureProcessorProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type DataframeAnalysisFeatureProcessorVariant interface { + DataframeAnalysisFeatureProcessorCaster() *DataframeAnalysisFeatureProcessor +} + +func (s *DataframeAnalysisFeatureProcessor) DataframeAnalysisFeatureProcessorCaster() *DataframeAnalysisFeatureProcessor { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go b/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go index ab19f37459..e50bd7c1e2 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DataframeAnalysisFeatureProcessorFrequencyEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L260-L267 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L260-L267 type DataframeAnalysisFeatureProcessorFrequencyEncoding struct { // FeatureName The resulting feature name. FeatureName string `json:"feature_name"` @@ -81,8 +81,18 @@ func (s *DataframeAnalysisFeatureProcessorFrequencyEncoding) UnmarshalJSON(data // NewDataframeAnalysisFeatureProcessorFrequencyEncoding returns a DataframeAnalysisFeatureProcessorFrequencyEncoding. func NewDataframeAnalysisFeatureProcessorFrequencyEncoding() *DataframeAnalysisFeatureProcessorFrequencyEncoding { r := &DataframeAnalysisFeatureProcessorFrequencyEncoding{ - FrequencyMap: make(map[string]Float64, 0), + FrequencyMap: make(map[string]Float64), } return r } + +// true + +type DataframeAnalysisFeatureProcessorFrequencyEncodingVariant interface { + DataframeAnalysisFeatureProcessorFrequencyEncodingCaster() *DataframeAnalysisFeatureProcessorFrequencyEncoding +} + +func (s *DataframeAnalysisFeatureProcessorFrequencyEncoding) DataframeAnalysisFeatureProcessorFrequencyEncodingCaster() *DataframeAnalysisFeatureProcessorFrequencyEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go b/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go index ffe323895c..6fdd2bca29 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DataframeAnalysisFeatureProcessorMultiEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L269-L272 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L269-L272 type DataframeAnalysisFeatureProcessorMultiEncoding struct { // Processors The ordered array of custom processors to execute. Must be more than 1. Processors []int `json:"processors"` @@ -34,3 +34,13 @@ func NewDataframeAnalysisFeatureProcessorMultiEncoding() *DataframeAnalysisFeatu return r } + +// true + +type DataframeAnalysisFeatureProcessorMultiEncodingVariant interface { + DataframeAnalysisFeatureProcessorMultiEncodingCaster() *DataframeAnalysisFeatureProcessorMultiEncoding +} + +func (s *DataframeAnalysisFeatureProcessorMultiEncoding) DataframeAnalysisFeatureProcessorMultiEncodingCaster() *DataframeAnalysisFeatureProcessorMultiEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go b/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go index c616d7d7e2..b34e6c205f 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisFeatureProcessorNGramEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L274-L286 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L274-L286 type DataframeAnalysisFeatureProcessorNGramEncoding struct { Custom *bool `json:"custom,omitempty"` // FeaturePrefix The feature name prefix. Defaults to ngram__. @@ -143,3 +143,13 @@ func NewDataframeAnalysisFeatureProcessorNGramEncoding() *DataframeAnalysisFeatu return r } + +// true + +type DataframeAnalysisFeatureProcessorNGramEncodingVariant interface { + DataframeAnalysisFeatureProcessorNGramEncodingCaster() *DataframeAnalysisFeatureProcessorNGramEncoding +} + +func (s *DataframeAnalysisFeatureProcessorNGramEncoding) DataframeAnalysisFeatureProcessorNGramEncodingCaster() *DataframeAnalysisFeatureProcessorNGramEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go b/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go index 3865b22af6..342cfb61e3 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisFeatureProcessorOneHotEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L288-L293 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L288-L293 type DataframeAnalysisFeatureProcessorOneHotEncoding struct { // Field The name of the field to encode. Field string `json:"field"` @@ -82,3 +82,13 @@ func NewDataframeAnalysisFeatureProcessorOneHotEncoding() *DataframeAnalysisFeat return r } + +// true + +type DataframeAnalysisFeatureProcessorOneHotEncodingVariant interface { + DataframeAnalysisFeatureProcessorOneHotEncodingCaster() *DataframeAnalysisFeatureProcessorOneHotEncoding +} + +func (s *DataframeAnalysisFeatureProcessorOneHotEncoding) DataframeAnalysisFeatureProcessorOneHotEncodingCaster() *DataframeAnalysisFeatureProcessorOneHotEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go b/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go index 2798a3f71b..5067097e48 100644 --- a/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go +++ b/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisFeatureProcessorTargetMeanEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L295-L304 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L295-L304 type DataframeAnalysisFeatureProcessorTargetMeanEncoding struct { // DefaultValue The default value if field value is not found in the target_map. DefaultValue int `json:"default_value"` @@ -100,8 +100,18 @@ func (s *DataframeAnalysisFeatureProcessorTargetMeanEncoding) UnmarshalJSON(data // NewDataframeAnalysisFeatureProcessorTargetMeanEncoding returns a DataframeAnalysisFeatureProcessorTargetMeanEncoding. func NewDataframeAnalysisFeatureProcessorTargetMeanEncoding() *DataframeAnalysisFeatureProcessorTargetMeanEncoding { r := &DataframeAnalysisFeatureProcessorTargetMeanEncoding{ - TargetMap: make(map[string]json.RawMessage, 0), + TargetMap: make(map[string]json.RawMessage), } return r } + +// true + +type DataframeAnalysisFeatureProcessorTargetMeanEncodingVariant interface { + DataframeAnalysisFeatureProcessorTargetMeanEncodingCaster() *DataframeAnalysisFeatureProcessorTargetMeanEncoding +} + +func (s *DataframeAnalysisFeatureProcessorTargetMeanEncoding) DataframeAnalysisFeatureProcessorTargetMeanEncodingCaster() *DataframeAnalysisFeatureProcessorTargetMeanEncoding { + return s +} diff --git a/typedapi/types/dataframeanalysisoutlierdetection.go b/typedapi/types/dataframeanalysisoutlierdetection.go index aa4325cd8b..9c12b765b9 100644 --- a/typedapi/types/dataframeanalysisoutlierdetection.go +++ b/typedapi/types/dataframeanalysisoutlierdetection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisOutlierDetection type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L103-L132 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L103-L132 type DataframeAnalysisOutlierDetection struct { // ComputeFeatureInfluence Specifies whether the feature influence calculation is enabled. ComputeFeatureInfluence *bool `json:"compute_feature_influence,omitempty"` @@ -173,3 +173,13 @@ func NewDataframeAnalysisOutlierDetection() *DataframeAnalysisOutlierDetection { return r } + +// true + +type DataframeAnalysisOutlierDetectionVariant interface { + DataframeAnalysisOutlierDetectionCaster() *DataframeAnalysisOutlierDetection +} + +func (s *DataframeAnalysisOutlierDetection) DataframeAnalysisOutlierDetectionCaster() *DataframeAnalysisOutlierDetection { + return s +} diff --git a/typedapi/types/dataframeanalysisregression.go b/typedapi/types/dataframeanalysisregression.go index 7b7cc9204a..88b58290ea 100644 --- a/typedapi/types/dataframeanalysisregression.go +++ b/typedapi/types/dataframeanalysisregression.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalysisRegression type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L215-L225 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L215-L225 type DataframeAnalysisRegression struct { // Alpha Advanced configuration option. Machine learning uses loss guided tree // growing, which means that the decision trees grow where the regularized loss @@ -457,3 +457,13 @@ func NewDataframeAnalysisRegression() *DataframeAnalysisRegression { return r } + +// true + +type DataframeAnalysisRegressionVariant interface { + DataframeAnalysisRegressionCaster() *DataframeAnalysisRegression +} + +func (s *DataframeAnalysisRegression) DataframeAnalysisRegressionCaster() *DataframeAnalysisRegression { + return s +} diff --git a/typedapi/types/dataframeanalytics.go b/typedapi/types/dataframeanalytics.go index 7fe9a18da8..e534d49593 100644 --- a/typedapi/types/dataframeanalytics.go +++ b/typedapi/types/dataframeanalytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DataframeAnalytics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L324-L344 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L325-L345 type DataframeAnalytics struct { // AnalysisStats An object containing information about the analysis job. AnalysisStats *DataframeAnalyticsStatsContainer `json:"analysis_stats,omitempty"` @@ -131,3 +131,5 @@ func NewDataframeAnalytics() *DataframeAnalytics { return r } + +// false diff --git a/typedapi/types/dataframeanalyticsauthorization.go b/typedapi/types/dataframeanalyticsauthorization.go index 087221320d..059419ac45 100644 --- a/typedapi/types/dataframeanalyticsauthorization.go +++ b/typedapi/types/dataframeanalyticsauthorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Authorization.ts#L45-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Authorization.ts#L45-L57 type DataframeAnalyticsAuthorization struct { // ApiKey If an API key was used for the most recent update to the job, its name and // identifier are listed in the response. @@ -92,3 +92,5 @@ func NewDataframeAnalyticsAuthorization() *DataframeAnalyticsAuthorization { return r } + +// false diff --git a/typedapi/types/dataframeanalyticsdestination.go b/typedapi/types/dataframeanalyticsdestination.go index 3bd050e6d7..2189fde23b 100644 --- a/typedapi/types/dataframeanalyticsdestination.go +++ b/typedapi/types/dataframeanalyticsdestination.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DataframeAnalyticsDestination type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L77-L82 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L77-L82 type DataframeAnalyticsDestination struct { // Index Defines the destination index to store the results of the data frame // analytics job. @@ -76,3 +76,13 @@ func NewDataframeAnalyticsDestination() *DataframeAnalyticsDestination { return r } + +// true + +type DataframeAnalyticsDestinationVariant interface { + DataframeAnalyticsDestinationCaster() *DataframeAnalyticsDestination +} + +func (s *DataframeAnalyticsDestination) DataframeAnalyticsDestinationCaster() *DataframeAnalyticsDestination { + return s +} diff --git a/typedapi/types/dataframeanalyticsfieldselection.go b/typedapi/types/dataframeanalyticsfieldselection.go index c614e38977..8507b2f44b 100644 --- a/typedapi/types/dataframeanalyticsfieldselection.go +++ b/typedapi/types/dataframeanalyticsfieldselection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsFieldSelection type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L55-L68 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L55-L68 type DataframeAnalyticsFieldSelection struct { // FeatureType The feature type of this field for the analysis. May be categorical or // numerical. @@ -136,3 +136,5 @@ func NewDataframeAnalyticsFieldSelection() *DataframeAnalyticsFieldSelection { return r } + +// false diff --git a/typedapi/types/dataframeanalyticsmemoryestimation.go b/typedapi/types/dataframeanalyticsmemoryestimation.go index 217a801a84..9537ec757e 100644 --- a/typedapi/types/dataframeanalyticsmemoryestimation.go +++ b/typedapi/types/dataframeanalyticsmemoryestimation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsMemoryEstimation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L70-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L70-L75 type DataframeAnalyticsMemoryEstimation struct { // ExpectedMemoryWithDisk Estimated memory usage under the assumption that overflowing to disk is // allowed during data frame analytics. expected_memory_with_disk is usually @@ -93,3 +93,5 @@ func NewDataframeAnalyticsMemoryEstimation() *DataframeAnalyticsMemoryEstimation return r } + +// false diff --git a/typedapi/types/dataframeanalyticsrecord.go b/typedapi/types/dataframeanalyticsrecord.go index 27c5942ef5..92530d57b5 100644 --- a/typedapi/types/dataframeanalyticsrecord.go +++ b/typedapi/types/dataframeanalyticsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataFrameAnalyticsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/ml_data_frame_analytics/types.ts#L22-L102 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/ml_data_frame_analytics/types.ts#L22-L102 type DataFrameAnalyticsRecord struct { // AssignmentExplanation Messages related to the selection of a node. AssignmentExplanation *string `json:"assignment_explanation,omitempty"` @@ -237,3 +237,5 @@ func NewDataFrameAnalyticsRecord() *DataFrameAnalyticsRecord { return r } + +// false diff --git a/typedapi/types/dataframeanalyticssource.go b/typedapi/types/dataframeanalyticssource.go index 70f3db3a5d..d981a6e001 100644 --- a/typedapi/types/dataframeanalyticssource.go +++ b/typedapi/types/dataframeanalyticssource.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DataframeAnalyticsSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L39-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L39-L53 type DataframeAnalyticsSource struct { // Index Index or indices on which to perform the analysis. It can be a single index // or index pattern as well as an array of indices or patterns. NOTE: If your @@ -109,3 +109,13 @@ func NewDataframeAnalyticsSource() *DataframeAnalyticsSource { return r } + +// true + +type DataframeAnalyticsSourceVariant interface { + DataframeAnalyticsSourceCaster() *DataframeAnalyticsSource +} + +func (s *DataframeAnalyticsSource) DataframeAnalyticsSourceCaster() *DataframeAnalyticsSource { + return s +} diff --git a/typedapi/types/dataframeanalyticsstatscontainer.go b/typedapi/types/dataframeanalyticsstatscontainer.go index 7db26960b4..e28f6f7e3d 100644 --- a/typedapi/types/dataframeanalyticsstatscontainer.go +++ b/typedapi/types/dataframeanalyticsstatscontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // DataframeAnalyticsStatsContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L373-L381 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L374-L382 type DataframeAnalyticsStatsContainer struct { + AdditionalDataframeAnalyticsStatsContainerProperty map[string]json.RawMessage `json:"-"` // ClassificationStats An object containing information about the classification analysis job. ClassificationStats *DataframeAnalyticsStatsHyperparameters `json:"classification_stats,omitempty"` // OutlierDetectionStats An object containing information about the outlier detection job. @@ -32,9 +38,42 @@ type DataframeAnalyticsStatsContainer struct { RegressionStats *DataframeAnalyticsStatsHyperparameters `json:"regression_stats,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeAnalyticsStatsContainer) MarshalJSON() ([]byte, error) { + type opt DataframeAnalyticsStatsContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeAnalyticsStatsContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeAnalyticsStatsContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDataframeAnalyticsStatsContainer returns a DataframeAnalyticsStatsContainer. func NewDataframeAnalyticsStatsContainer() *DataframeAnalyticsStatsContainer { - r := &DataframeAnalyticsStatsContainer{} + r := &DataframeAnalyticsStatsContainer{ + AdditionalDataframeAnalyticsStatsContainerProperty: make(map[string]json.RawMessage), + } return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatsdatacounts.go b/typedapi/types/dataframeanalyticsstatsdatacounts.go index 9d5976aae2..5a4f1d460b 100644 --- a/typedapi/types/dataframeanalyticsstatsdatacounts.go +++ b/typedapi/types/dataframeanalyticsstatsdatacounts.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsStatsDataCounts type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L364-L371 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L365-L372 type DataframeAnalyticsStatsDataCounts struct { // SkippedDocsCount The number of documents that are skipped during the analysis because they // contained values that are not supported by the analysis. For example, outlier @@ -120,3 +120,5 @@ func NewDataframeAnalyticsStatsDataCounts() *DataframeAnalyticsStatsDataCounts { return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatshyperparameters.go b/typedapi/types/dataframeanalyticsstatshyperparameters.go index dd5fee2942..e3856651df 100644 --- a/typedapi/types/dataframeanalyticsstatshyperparameters.go +++ b/typedapi/types/dataframeanalyticsstatshyperparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsStatsHyperparameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L383-L402 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L384-L403 type DataframeAnalyticsStatsHyperparameters struct { // Hyperparameters An object containing the parameters of the classification analysis job. Hyperparameters Hyperparameters `json:"hyperparameters"` @@ -108,3 +108,5 @@ func NewDataframeAnalyticsStatsHyperparameters() *DataframeAnalyticsStatsHyperpa return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatsmemoryusage.go b/typedapi/types/dataframeanalyticsstatsmemoryusage.go index 44963d936d..9ca6767eb8 100644 --- a/typedapi/types/dataframeanalyticsstatsmemoryusage.go +++ b/typedapi/types/dataframeanalyticsstatsmemoryusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsStatsMemoryUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L353-L362 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L354-L363 type DataframeAnalyticsStatsMemoryUsage struct { // MemoryReestimateBytes This value is present when the status is hard_limit and it is a new estimate // of how much memory the job needs. @@ -117,3 +117,5 @@ func NewDataframeAnalyticsStatsMemoryUsage() *DataframeAnalyticsStatsMemoryUsage return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatsoutlierdetection.go b/typedapi/types/dataframeanalyticsstatsoutlierdetection.go index 87d7bc9876..4e4f704da0 100644 --- a/typedapi/types/dataframeanalyticsstatsoutlierdetection.go +++ b/typedapi/types/dataframeanalyticsstatsoutlierdetection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DataframeAnalyticsStatsOutlierDetection type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L404-L417 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L405-L418 type DataframeAnalyticsStatsOutlierDetection struct { // Parameters The list of job parameters specified by the user or determined by algorithmic // heuristics. @@ -83,3 +83,5 @@ func NewDataframeAnalyticsStatsOutlierDetection() *DataframeAnalyticsStatsOutlie return r } + +// false diff --git a/typedapi/types/dataframeanalyticsstatsprogress.go b/typedapi/types/dataframeanalyticsstatsprogress.go index 33ea5646ee..919c559da4 100644 --- a/typedapi/types/dataframeanalyticsstatsprogress.go +++ b/typedapi/types/dataframeanalyticsstatsprogress.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsStatsProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L346-L351 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L347-L352 type DataframeAnalyticsStatsProgress struct { // Phase Defines the phase of the data frame analytics job. Phase string `json:"phase"` @@ -94,3 +94,5 @@ func NewDataframeAnalyticsStatsProgress() *DataframeAnalyticsStatsProgress { return r } + +// false diff --git a/typedapi/types/dataframeanalyticssummary.go b/typedapi/types/dataframeanalyticssummary.go index a41ce49beb..c54703d497 100644 --- a/typedapi/types/dataframeanalyticssummary.go +++ b/typedapi/types/dataframeanalyticssummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeAnalyticsSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L306-L322 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L306-L323 type DataframeAnalyticsSummary struct { AllowLazyStart *bool `json:"allow_lazy_start,omitempty"` Analysis DataframeAnalysisContainer `json:"analysis"` @@ -45,6 +45,7 @@ type DataframeAnalyticsSummary struct { Dest DataframeAnalyticsDestination `json:"dest"` Id string `json:"id"` MaxNumThreads *int `json:"max_num_threads,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` Source DataframeAnalyticsSource `json:"source"` Version *string `json:"version,omitempty"` @@ -137,6 +138,11 @@ func (s *DataframeAnalyticsSummary) UnmarshalJSON(data []byte) error { s.MaxNumThreads = &f } + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + case "model_memory_limit": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -170,3 +176,5 @@ func NewDataframeAnalyticsSummary() *DataframeAnalyticsSummary { return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummary.go b/typedapi/types/dataframeclassificationsummary.go index ce34f84d76..06534c219a 100644 --- a/typedapi/types/dataframeclassificationsummary.go +++ b/typedapi/types/dataframeclassificationsummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DataframeClassificationSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L44-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L44-L66 type DataframeClassificationSummary struct { // Accuracy Accuracy of predictions (per-class and overall). Accuracy *DataframeClassificationSummaryAccuracy `json:"accuracy,omitempty"` @@ -45,3 +45,5 @@ func NewDataframeClassificationSummary() *DataframeClassificationSummary { return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummaryaccuracy.go b/typedapi/types/dataframeclassificationsummaryaccuracy.go index 359c6226f8..149ce4f9f0 100644 --- a/typedapi/types/dataframeclassificationsummaryaccuracy.go +++ b/typedapi/types/dataframeclassificationsummaryaccuracy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeClassificationSummaryAccuracy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L111-L114 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L111-L114 type DataframeClassificationSummaryAccuracy struct { Classes []DataframeEvaluationClass `json:"classes"` OverallAccuracy Float64 `json:"overall_accuracy"` @@ -84,3 +84,5 @@ func NewDataframeClassificationSummaryAccuracy() *DataframeClassificationSummary return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go b/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go index 6e4290b9c9..ab5c6f71fe 100644 --- a/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go +++ b/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeClassificationSummaryMulticlassConfusionMatrix type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L120-L123 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L120-L123 type DataframeClassificationSummaryMulticlassConfusionMatrix struct { ConfusionMatrix []ConfusionMatrixItem `json:"confusion_matrix"` OtherActualClassCount int `json:"other_actual_class_count"` @@ -84,3 +84,5 @@ func NewDataframeClassificationSummaryMulticlassConfusionMatrix() *DataframeClas return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummaryprecision.go b/typedapi/types/dataframeclassificationsummaryprecision.go index eb2dfbfded..5b5f37fcd4 100644 --- a/typedapi/types/dataframeclassificationsummaryprecision.go +++ b/typedapi/types/dataframeclassificationsummaryprecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeClassificationSummaryPrecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L101-L104 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L101-L104 type DataframeClassificationSummaryPrecision struct { AvgPrecision Float64 `json:"avg_precision"` Classes []DataframeEvaluationClass `json:"classes"` @@ -84,3 +84,5 @@ func NewDataframeClassificationSummaryPrecision() *DataframeClassificationSummar return r } + +// false diff --git a/typedapi/types/dataframeclassificationsummaryrecall.go b/typedapi/types/dataframeclassificationsummaryrecall.go index 810de51cdc..dcf557a25e 100644 --- a/typedapi/types/dataframeclassificationsummaryrecall.go +++ b/typedapi/types/dataframeclassificationsummaryrecall.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeClassificationSummaryRecall type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L106-L109 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L106-L109 type DataframeClassificationSummaryRecall struct { AvgRecall Float64 `json:"avg_recall"` Classes []DataframeEvaluationClass `json:"classes"` @@ -84,3 +84,5 @@ func NewDataframeClassificationSummaryRecall() *DataframeClassificationSummaryRe return r } + +// false diff --git a/typedapi/types/dataframeevaluationclass.go b/typedapi/types/dataframeevaluationclass.go index 66432c69d7..f83c01bb63 100644 --- a/typedapi/types/dataframeevaluationclass.go +++ b/typedapi/types/dataframeevaluationclass.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationClass type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L116-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L116-L118 type DataframeEvaluationClass struct { ClassName string `json:"class_name"` Value Float64 `json:"value"` @@ -84,3 +84,5 @@ func NewDataframeEvaluationClass() *DataframeEvaluationClass { return r } + +// false diff --git a/typedapi/types/dataframeevaluationclassification.go b/typedapi/types/dataframeevaluationclassification.go index 7e5ac80ed5..af3f9332cd 100644 --- a/typedapi/types/dataframeevaluationclassification.go +++ b/typedapi/types/dataframeevaluationclassification.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DataframeEvaluationClassification type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L35-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L35-L44 type DataframeEvaluationClassification struct { // ActualField The field of the index which contains the ground truth. The data type of this // field can be boolean or integer. If the data type is integer, the value has @@ -93,3 +93,13 @@ func NewDataframeEvaluationClassification() *DataframeEvaluationClassification { return r } + +// true + +type DataframeEvaluationClassificationVariant interface { + DataframeEvaluationClassificationCaster() *DataframeEvaluationClassification +} + +func (s *DataframeEvaluationClassification) DataframeEvaluationClassificationCaster() *DataframeEvaluationClassification { + return s +} diff --git a/typedapi/types/dataframeevaluationclassificationmetrics.go b/typedapi/types/dataframeevaluationclassificationmetrics.go index b1d97078d7..c66fb4497b 100644 --- a/typedapi/types/dataframeevaluationclassificationmetrics.go +++ b/typedapi/types/dataframeevaluationclassificationmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationClassificationMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L73-L78 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L73-L78 type DataframeEvaluationClassificationMetrics struct { // Accuracy Accuracy of predictions (per-class and overall). Accuracy map[string]json.RawMessage `json:"accuracy,omitempty"` @@ -45,11 +45,21 @@ type DataframeEvaluationClassificationMetrics struct { // NewDataframeEvaluationClassificationMetrics returns a DataframeEvaluationClassificationMetrics. func NewDataframeEvaluationClassificationMetrics() *DataframeEvaluationClassificationMetrics { r := &DataframeEvaluationClassificationMetrics{ - Accuracy: make(map[string]json.RawMessage, 0), - MulticlassConfusionMatrix: make(map[string]json.RawMessage, 0), - Precision: make(map[string]json.RawMessage, 0), - Recall: make(map[string]json.RawMessage, 0), + Accuracy: make(map[string]json.RawMessage), + MulticlassConfusionMatrix: make(map[string]json.RawMessage), + Precision: make(map[string]json.RawMessage), + Recall: make(map[string]json.RawMessage), } return r } + +// true + +type DataframeEvaluationClassificationMetricsVariant interface { + DataframeEvaluationClassificationMetricsCaster() *DataframeEvaluationClassificationMetrics +} + +func (s *DataframeEvaluationClassificationMetrics) DataframeEvaluationClassificationMetricsCaster() *DataframeEvaluationClassificationMetrics { + return s +} diff --git a/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go b/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go index 953a6fefa1..1816ffe360 100644 --- a/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go +++ b/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationClassificationMetricsAucRoc type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L85-L90 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L85-L90 type DataframeEvaluationClassificationMetricsAucRoc struct { // ClassName Name of the only class that is treated as positive during AUC ROC // calculation. Other classes are treated as negative ("one-vs-all" strategy). @@ -88,3 +88,13 @@ func NewDataframeEvaluationClassificationMetricsAucRoc() *DataframeEvaluationCla return r } + +// true + +type DataframeEvaluationClassificationMetricsAucRocVariant interface { + DataframeEvaluationClassificationMetricsAucRocCaster() *DataframeEvaluationClassificationMetricsAucRoc +} + +func (s *DataframeEvaluationClassificationMetricsAucRoc) DataframeEvaluationClassificationMetricsAucRocCaster() *DataframeEvaluationClassificationMetricsAucRoc { + return s +} diff --git a/typedapi/types/dataframeevaluationcontainer.go b/typedapi/types/dataframeevaluationcontainer.go index b8e28144c9..bd5300e45a 100644 --- a/typedapi/types/dataframeevaluationcontainer.go +++ b/typedapi/types/dataframeevaluationcontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // DataframeEvaluationContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L25-L33 type DataframeEvaluationContainer struct { + AdditionalDataframeEvaluationContainerProperty map[string]json.RawMessage `json:"-"` // Classification Classification evaluation evaluates the results of a classification analysis // which outputs a prediction that identifies to which of the classes each // document belongs. @@ -36,9 +42,50 @@ type DataframeEvaluationContainer struct { Regression *DataframeEvaluationRegression `json:"regression,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeEvaluationContainer) MarshalJSON() ([]byte, error) { + type opt DataframeEvaluationContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeEvaluationContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeEvaluationContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDataframeEvaluationContainer returns a DataframeEvaluationContainer. func NewDataframeEvaluationContainer() *DataframeEvaluationContainer { - r := &DataframeEvaluationContainer{} + r := &DataframeEvaluationContainer{ + AdditionalDataframeEvaluationContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type DataframeEvaluationContainerVariant interface { + DataframeEvaluationContainerCaster() *DataframeEvaluationContainer +} + +func (s *DataframeEvaluationContainer) DataframeEvaluationContainerCaster() *DataframeEvaluationContainer { + return s +} diff --git a/typedapi/types/dataframeevaluationmetrics.go b/typedapi/types/dataframeevaluationmetrics.go deleted file mode 100644 index 28f0e0910a..0000000000 --- a/typedapi/types/dataframeevaluationmetrics.go +++ /dev/null @@ -1,49 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "encoding/json" -) - -// DataframeEvaluationMetrics type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L64-L71 -type DataframeEvaluationMetrics struct { - // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) - // score and optionally the curve. It is calculated for a specific class - // (provided as "class_name") treated as positive. - AucRoc *DataframeEvaluationClassificationMetricsAucRoc `json:"auc_roc,omitempty"` - // Precision Precision of predictions (per-class and average). - Precision map[string]json.RawMessage `json:"precision,omitempty"` - // Recall Recall of predictions (per-class and average). - Recall map[string]json.RawMessage `json:"recall,omitempty"` -} - -// NewDataframeEvaluationMetrics returns a DataframeEvaluationMetrics. -func NewDataframeEvaluationMetrics() *DataframeEvaluationMetrics { - r := &DataframeEvaluationMetrics{ - Precision: make(map[string]json.RawMessage, 0), - Recall: make(map[string]json.RawMessage, 0), - } - - return r -} diff --git a/typedapi/types/dataframeevaluationoutlierdetection.go b/typedapi/types/dataframeevaluationoutlierdetection.go index fc97f5bd09..88338de89c 100644 --- a/typedapi/types/dataframeevaluationoutlierdetection.go +++ b/typedapi/types/dataframeevaluationoutlierdetection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DataframeEvaluationOutlierDetection type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L46-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L46-L53 type DataframeEvaluationOutlierDetection struct { // ActualField The field of the index which contains the ground truth. The data type of this // field can be boolean or integer. If the data type is integer, the value has @@ -85,3 +85,13 @@ func NewDataframeEvaluationOutlierDetection() *DataframeEvaluationOutlierDetecti return r } + +// true + +type DataframeEvaluationOutlierDetectionVariant interface { + DataframeEvaluationOutlierDetectionCaster() *DataframeEvaluationOutlierDetection +} + +func (s *DataframeEvaluationOutlierDetection) DataframeEvaluationOutlierDetectionCaster() *DataframeEvaluationOutlierDetection { + return s +} diff --git a/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go b/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go index b4a6727b71..7f1f8580f6 100644 --- a/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go +++ b/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationOutlierDetectionMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L80-L83 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L80-L83 type DataframeEvaluationOutlierDetectionMetrics struct { // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) // score and optionally the curve. It is calculated for a specific class @@ -43,10 +43,20 @@ type DataframeEvaluationOutlierDetectionMetrics struct { // NewDataframeEvaluationOutlierDetectionMetrics returns a DataframeEvaluationOutlierDetectionMetrics. func NewDataframeEvaluationOutlierDetectionMetrics() *DataframeEvaluationOutlierDetectionMetrics { r := &DataframeEvaluationOutlierDetectionMetrics{ - ConfusionMatrix: make(map[string]json.RawMessage, 0), - Precision: make(map[string]json.RawMessage, 0), - Recall: make(map[string]json.RawMessage, 0), + ConfusionMatrix: make(map[string]json.RawMessage), + Precision: make(map[string]json.RawMessage), + Recall: make(map[string]json.RawMessage), } return r } + +// true + +type DataframeEvaluationOutlierDetectionMetricsVariant interface { + DataframeEvaluationOutlierDetectionMetricsCaster() *DataframeEvaluationOutlierDetectionMetrics +} + +func (s *DataframeEvaluationOutlierDetectionMetrics) DataframeEvaluationOutlierDetectionMetricsCaster() *DataframeEvaluationOutlierDetectionMetrics { + return s +} diff --git a/typedapi/types/dataframeevaluationregression.go b/typedapi/types/dataframeevaluationregression.go index 78d4bd8986..ec3bff69fd 100644 --- a/typedapi/types/dataframeevaluationregression.go +++ b/typedapi/types/dataframeevaluationregression.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DataframeEvaluationRegression type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L55-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L55-L62 type DataframeEvaluationRegression struct { // ActualField The field of the index which contains the ground truth. The data type of this // field must be numerical. @@ -85,3 +85,13 @@ func NewDataframeEvaluationRegression() *DataframeEvaluationRegression { return r } + +// true + +type DataframeEvaluationRegressionVariant interface { + DataframeEvaluationRegressionCaster() *DataframeEvaluationRegression +} + +func (s *DataframeEvaluationRegression) DataframeEvaluationRegressionCaster() *DataframeEvaluationRegression { + return s +} diff --git a/typedapi/types/dataframeevaluationregressionmetrics.go b/typedapi/types/dataframeevaluationregressionmetrics.go index 332f76fd22..312d0778e7 100644 --- a/typedapi/types/dataframeevaluationregressionmetrics.go +++ b/typedapi/types/dataframeevaluationregressionmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationRegressionMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L92-L110 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L92-L110 type DataframeEvaluationRegressionMetrics struct { // Huber Pseudo Huber loss function. Huber *DataframeEvaluationRegressionMetricsHuber `json:"huber,omitempty"` @@ -44,9 +44,19 @@ type DataframeEvaluationRegressionMetrics struct { // NewDataframeEvaluationRegressionMetrics returns a DataframeEvaluationRegressionMetrics. func NewDataframeEvaluationRegressionMetrics() *DataframeEvaluationRegressionMetrics { r := &DataframeEvaluationRegressionMetrics{ - Mse: make(map[string]json.RawMessage, 0), - RSquared: make(map[string]json.RawMessage, 0), + Mse: make(map[string]json.RawMessage), + RSquared: make(map[string]json.RawMessage), } return r } + +// true + +type DataframeEvaluationRegressionMetricsVariant interface { + DataframeEvaluationRegressionMetricsCaster() *DataframeEvaluationRegressionMetrics +} + +func (s *DataframeEvaluationRegressionMetrics) DataframeEvaluationRegressionMetricsCaster() *DataframeEvaluationRegressionMetrics { + return s +} diff --git a/typedapi/types/dataframeevaluationregressionmetricshuber.go b/typedapi/types/dataframeevaluationregressionmetricshuber.go index 8b5f0a04a7..7c1a7e67d4 100644 --- a/typedapi/types/dataframeevaluationregressionmetricshuber.go +++ b/typedapi/types/dataframeevaluationregressionmetricshuber.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationRegressionMetricsHuber type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L117-L120 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L117-L120 type DataframeEvaluationRegressionMetricsHuber struct { // Delta Approximates 1/2 (prediction - actual)2 for values much less than delta and // approximates a straight line with slope delta for values much larger than @@ -81,3 +81,13 @@ func NewDataframeEvaluationRegressionMetricsHuber() *DataframeEvaluationRegressi return r } + +// true + +type DataframeEvaluationRegressionMetricsHuberVariant interface { + DataframeEvaluationRegressionMetricsHuberCaster() *DataframeEvaluationRegressionMetricsHuber +} + +func (s *DataframeEvaluationRegressionMetricsHuber) DataframeEvaluationRegressionMetricsHuberCaster() *DataframeEvaluationRegressionMetricsHuber { + return s +} diff --git a/typedapi/types/dataframeevaluationregressionmetricsmsle.go b/typedapi/types/dataframeevaluationregressionmetricsmsle.go index c28fefff6f..f92b9afa9b 100644 --- a/typedapi/types/dataframeevaluationregressionmetricsmsle.go +++ b/typedapi/types/dataframeevaluationregressionmetricsmsle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationRegressionMetricsMsle type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeEvaluation.ts#L112-L115 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeEvaluation.ts#L112-L115 type DataframeEvaluationRegressionMetricsMsle struct { // Offset Defines the transition point at which you switch from minimizing quadratic // error to minimizing quadratic log error. Defaults to 1. @@ -80,3 +80,13 @@ func NewDataframeEvaluationRegressionMetricsMsle() *DataframeEvaluationRegressio return r } + +// true + +type DataframeEvaluationRegressionMetricsMsleVariant interface { + DataframeEvaluationRegressionMetricsMsleCaster() *DataframeEvaluationRegressionMetricsMsle +} + +func (s *DataframeEvaluationRegressionMetricsMsle) DataframeEvaluationRegressionMetricsMsleCaster() *DataframeEvaluationRegressionMetricsMsle { + return s +} diff --git a/typedapi/types/dataframeevaluationsummaryaucroc.go b/typedapi/types/dataframeevaluationsummaryaucroc.go index e691ab10a6..08b68da136 100644 --- a/typedapi/types/dataframeevaluationsummaryaucroc.go +++ b/typedapi/types/dataframeevaluationsummaryaucroc.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationSummaryAucRoc type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L91-L93 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L91-L93 type DataframeEvaluationSummaryAucRoc struct { Curve []DataframeEvaluationSummaryAucRocCurveItem `json:"curve,omitempty"` Value Float64 `json:"value"` @@ -84,3 +84,5 @@ func NewDataframeEvaluationSummaryAucRoc() *DataframeEvaluationSummaryAucRoc { return r } + +// false diff --git a/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go b/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go index 47f2f089b0..7dca2eeba5 100644 --- a/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go +++ b/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationSummaryAucRocCurveItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L95-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L95-L99 type DataframeEvaluationSummaryAucRocCurveItem struct { Fpr Float64 `json:"fpr"` Threshold Float64 `json:"threshold"` @@ -112,3 +112,5 @@ func NewDataframeEvaluationSummaryAucRocCurveItem() *DataframeEvaluationSummaryA return r } + +// false diff --git a/typedapi/types/dataframeevaluationvalue.go b/typedapi/types/dataframeevaluationvalue.go index a4d64bca66..a66a557391 100644 --- a/typedapi/types/dataframeevaluationvalue.go +++ b/typedapi/types/dataframeevaluationvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframeEvaluationValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L87-L89 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L87-L89 type DataframeEvaluationValue struct { Value Float64 `json:"value"` } @@ -78,3 +78,5 @@ func NewDataframeEvaluationValue() *DataframeEvaluationValue { return r } + +// false diff --git a/typedapi/types/dataframeoutlierdetectionsummary.go b/typedapi/types/dataframeoutlierdetectionsummary.go index 0b57568abc..747811ff6f 100644 --- a/typedapi/types/dataframeoutlierdetectionsummary.go +++ b/typedapi/types/dataframeoutlierdetectionsummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DataframeOutlierDetectionSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L24-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L24-L42 type DataframeOutlierDetectionSummary struct { // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) // score and optionally the curve. @@ -42,10 +42,12 @@ type DataframeOutlierDetectionSummary struct { // NewDataframeOutlierDetectionSummary returns a DataframeOutlierDetectionSummary. func NewDataframeOutlierDetectionSummary() *DataframeOutlierDetectionSummary { r := &DataframeOutlierDetectionSummary{ - ConfusionMatrix: make(map[string]ConfusionMatrixThreshold, 0), - Precision: make(map[string]Float64, 0), - Recall: make(map[string]Float64, 0), + ConfusionMatrix: make(map[string]ConfusionMatrixThreshold), + Precision: make(map[string]Float64), + Recall: make(map[string]Float64), } return r } + +// false diff --git a/typedapi/types/dataframepreviewconfig.go b/typedapi/types/dataframepreviewconfig.go index 9043946ce2..75f0c9fb4a 100644 --- a/typedapi/types/dataframepreviewconfig.go +++ b/typedapi/types/dataframepreviewconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataframePreviewConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/preview_data_frame_analytics/types.ts#L27-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/preview_data_frame_analytics/types.ts#L27-L33 type DataframePreviewConfig struct { Analysis DataframeAnalysisContainer `json:"analysis"` AnalyzedFields *DataframeAnalysisAnalyzedFields `json:"analyzed_fields,omitempty"` @@ -109,3 +109,13 @@ func NewDataframePreviewConfig() *DataframePreviewConfig { return r } + +// true + +type DataframePreviewConfigVariant interface { + DataframePreviewConfigCaster() *DataframePreviewConfig +} + +func (s *DataframePreviewConfig) DataframePreviewConfigCaster() *DataframePreviewConfig { + return s +} diff --git a/typedapi/types/dataframeregressionsummary.go b/typedapi/types/dataframeregressionsummary.go index f93850c881..f264ea48fc 100644 --- a/typedapi/types/dataframeregressionsummary.go +++ b/typedapi/types/dataframeregressionsummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DataframeRegressionSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/evaluate_data_frame/types.ts#L68-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/evaluate_data_frame/types.ts#L68-L85 type DataframeRegressionSummary struct { // Huber Pseudo Huber loss function. Huber *DataframeEvaluationValue `json:"huber,omitempty"` @@ -43,3 +43,5 @@ func NewDataframeRegressionSummary() *DataframeRegressionSummary { return r } + +// false diff --git a/typedapi/types/datapathstats.go b/typedapi/types/datapathstats.go index df2580a20b..367aafd9ea 100644 --- a/typedapi/types/datapathstats.go +++ b/typedapi/types/datapathstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataPathStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L550-L594 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L586-L630 type DataPathStats struct { // Available Total amount of disk space available to this Java virtual machine on this // file store. @@ -301,3 +301,5 @@ func NewDataPathStats() *DataPathStats { return r } + +// false diff --git a/typedapi/types/datastream.go b/typedapi/types/datastream.go index a1cccd80fc..3cdb64f1fe 100644 --- a/typedapi/types/datastream.go +++ b/typedapi/types/datastream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,10 +34,12 @@ import ( // DataStream type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DataStream.ts#L39-L112 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStream.ts#L45-L127 type DataStream struct { // AllowCustomRouting If `true`, the data stream allows custom routing on write request. AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` + // FailureStore Information about failure store backing indices + FailureStore *FailureStore `json:"failure_store,omitempty"` // Generation Current generation for the data stream. This number acts as a cumulative // count of the stream’s rollovers, starting at 1. Generation int `json:"generation"` @@ -75,6 +77,10 @@ type DataStream struct { // replication and the local cluster can not write into this data stream or // change its mappings. Replicated *bool `json:"replicated,omitempty"` + // RolloverOnWrite If `true`, the next write to this data stream will trigger a rollover first + // and the document will be indexed in the new backing index. If the rollover + // fails the indexing request will fail too. + RolloverOnWrite bool `json:"rollover_on_write"` // Status Health status of the data stream. // This health status is based on the state of the primary and replica shards of // the stream’s backing indices. @@ -118,6 +124,11 @@ func (s *DataStream) UnmarshalJSON(data []byte) error { s.AllowCustomRouting = &v } + case "failure_store": + if err := dec.Decode(&s.FailureStore); err != nil { + return fmt.Errorf("%s | %w", "FailureStore", err) + } + case "generation": var tmp any @@ -206,6 +217,20 @@ func (s *DataStream) UnmarshalJSON(data []byte) error { s.Replicated = &v } + case "rollover_on_write": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RolloverOnWrite", err) + } + s.RolloverOnWrite = value + case bool: + s.RolloverOnWrite = v + } + case "status": if err := dec.Decode(&s.Status); err != nil { return fmt.Errorf("%s | %w", "Status", err) @@ -246,3 +271,5 @@ func NewDataStream() *DataStream { return r } + +// false diff --git a/typedapi/types/datastreamindex.go b/typedapi/types/datastreamindex.go index 139ad07d72..ea74635cb5 100644 --- a/typedapi/types/datastreamindex.go +++ b/typedapi/types/datastreamindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DataStreamIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DataStream.ts#L121-L142 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStream.ts#L136-L157 type DataStreamIndex struct { // IlmPolicy Name of the current ILM lifecycle policy configured for this backing index. IlmPolicy *string `json:"ilm_policy,omitempty"` @@ -42,10 +42,10 @@ type DataStreamIndex struct { // IndexUuid Universally unique identifier (UUID) for the index. IndexUuid string `json:"index_uuid"` // ManagedBy Name of the lifecycle system that's currently managing this backing index. - ManagedBy managedby.ManagedBy `json:"managed_by"` + ManagedBy *managedby.ManagedBy `json:"managed_by,omitempty"` // PreferIlm Indicates if ILM should take precedence over DSL in case both are configured // to manage this index. - PreferIlm bool `json:"prefer_ilm"` + PreferIlm *bool `json:"prefer_ilm,omitempty"` } func (s *DataStreamIndex) UnmarshalJSON(data []byte) error { @@ -92,9 +92,9 @@ func (s *DataStreamIndex) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "PreferIlm", err) } - s.PreferIlm = value + s.PreferIlm = &value case bool: - s.PreferIlm = v + s.PreferIlm = &v } } @@ -108,3 +108,5 @@ func NewDataStreamIndex() *DataStreamIndex { return r } + +// false diff --git a/typedapi/types/datastreamlifecycle.go b/typedapi/types/datastreamlifecycle.go index 29b6ea84dd..201f8e7573 100644 --- a/typedapi/types/datastreamlifecycle.go +++ b/typedapi/types/datastreamlifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,14 +26,25 @@ import ( "errors" "fmt" "io" + "strconv" ) // DataStreamLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DataStreamLifecycle.ts#L25-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStreamLifecycle.ts#L25-L45 type DataStreamLifecycle struct { - DataRetention Duration `json:"data_retention,omitempty"` - Downsampling *DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` + // DataRetention If defined, every document added to this data stream will be stored at least + // for this time frame. + // Any time after this duration the document could be deleted. + // When empty, every document in this data stream will be stored indefinitely. + DataRetention Duration `json:"data_retention,omitempty"` + // Downsampling The downsampling configuration to execute for the managed backing index after + // rollover. + Downsampling *DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` + // Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this + // data stream. A data stream lifecycle + // that's disabled (enabled: `false`) will have no effect on the data stream. + Enabled *bool `json:"enabled,omitempty"` } func (s *DataStreamLifecycle) UnmarshalJSON(data []byte) error { @@ -61,6 +72,20 @@ func (s *DataStreamLifecycle) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Downsampling", err) } + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + } } return nil @@ -72,3 +97,13 @@ func NewDataStreamLifecycle() *DataStreamLifecycle { return r } + +// true + +type DataStreamLifecycleVariant interface { + DataStreamLifecycleCaster() *DataStreamLifecycle +} + +func (s *DataStreamLifecycle) DataStreamLifecycleCaster() *DataStreamLifecycle { + return s +} diff --git a/typedapi/types/datastreamlifecycledetails.go b/typedapi/types/datastreamlifecycledetails.go new file mode 100644 index 0000000000..250e7fd167 --- /dev/null +++ b/typedapi/types/datastreamlifecycledetails.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamLifecycleDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L152-L156 +type DataStreamLifecycleDetails struct { + StagnatingBackingIndices []StagnatingBackingIndices `json:"stagnating_backing_indices,omitempty"` + StagnatingBackingIndicesCount int `json:"stagnating_backing_indices_count"` + TotalBackingIndicesInError int `json:"total_backing_indices_in_error"` +} + +func (s *DataStreamLifecycleDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stagnating_backing_indices": + if err := dec.Decode(&s.StagnatingBackingIndices); err != nil { + return fmt.Errorf("%s | %w", "StagnatingBackingIndices", err) + } + + case "stagnating_backing_indices_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "StagnatingBackingIndicesCount", err) + } + s.StagnatingBackingIndicesCount = value + case float64: + f := int(v) + s.StagnatingBackingIndicesCount = f + } + + case "total_backing_indices_in_error": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalBackingIndicesInError", err) + } + s.TotalBackingIndicesInError = value + case float64: + f := int(v) + s.TotalBackingIndicesInError = f + } + + } + } + return nil +} + +// NewDataStreamLifecycleDetails returns a DataStreamLifecycleDetails. +func NewDataStreamLifecycleDetails() *DataStreamLifecycleDetails { + r := &DataStreamLifecycleDetails{} + + return r +} + +// false diff --git a/typedapi/types/datastreamlifecycledownsampling.go b/typedapi/types/datastreamlifecycledownsampling.go index 4d44a93e3d..92eecc125e 100644 --- a/typedapi/types/datastreamlifecycledownsampling.go +++ b/typedapi/types/datastreamlifecycledownsampling.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DataStreamLifecycleDownsampling type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DataStreamLifecycleDownsampling.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStreamLifecycleDownsampling.ts#L22-L27 type DataStreamLifecycleDownsampling struct { // Rounds The list of downsampling rounds to execute as part of this downsampling // configuration @@ -35,3 +35,13 @@ func NewDataStreamLifecycleDownsampling() *DataStreamLifecycleDownsampling { return r } + +// true + +type DataStreamLifecycleDownsamplingVariant interface { + DataStreamLifecycleDownsamplingCaster() *DataStreamLifecycleDownsampling +} + +func (s *DataStreamLifecycleDownsampling) DataStreamLifecycleDownsamplingCaster() *DataStreamLifecycleDownsampling { + return s +} diff --git a/typedapi/types/datastreamlifecycleexplain.go b/typedapi/types/datastreamlifecycleexplain.go index da55752fac..4684e56336 100644 --- a/typedapi/types/datastreamlifecycleexplain.go +++ b/typedapi/types/datastreamlifecycleexplain.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataStreamLifecycleExplain type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41 type DataStreamLifecycleExplain struct { Error *string `json:"error,omitempty"` GenerationTime Duration `json:"generation_time,omitempty"` @@ -131,3 +131,5 @@ func NewDataStreamLifecycleExplain() *DataStreamLifecycleExplain { return r } + +// false diff --git a/typedapi/types/baseindicator.go b/typedapi/types/datastreamlifecycleindicator.go similarity index 72% rename from typedapi/types/baseindicator.go rename to typedapi/types/datastreamlifecycleindicator.go index 8864dbbd50..e8b049d392 100644 --- a/typedapi/types/baseindicator.go +++ b/typedapi/types/datastreamlifecycleindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,17 +31,18 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" ) -// BaseIndicator type. +// DataStreamLifecycleIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L42-L47 -type BaseIndicator struct { +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L147-L151 +type DataStreamLifecycleIndicator struct { + Details *DataStreamLifecycleDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` Impacts []Impact `json:"impacts,omitempty"` Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` Symptom string `json:"symptom"` } -func (s *BaseIndicator) UnmarshalJSON(data []byte) error { +func (s *DataStreamLifecycleIndicator) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -56,6 +57,11 @@ func (s *BaseIndicator) UnmarshalJSON(data []byte) error { switch t { + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + case "diagnosis": if err := dec.Decode(&s.Diagnosis); err != nil { return fmt.Errorf("%s | %w", "Diagnosis", err) @@ -88,9 +94,11 @@ func (s *BaseIndicator) UnmarshalJSON(data []byte) error { return nil } -// NewBaseIndicator returns a BaseIndicator. -func NewBaseIndicator() *BaseIndicator { - r := &BaseIndicator{} +// NewDataStreamLifecycleIndicator returns a DataStreamLifecycleIndicator. +func NewDataStreamLifecycleIndicator() *DataStreamLifecycleIndicator { + r := &DataStreamLifecycleIndicator{} return r } + +// false diff --git a/typedapi/types/datastreamlifecyclerolloverconditions.go b/typedapi/types/datastreamlifecyclerolloverconditions.go index dcaf4a344d..733fd46935 100644 --- a/typedapi/types/datastreamlifecyclerolloverconditions.go +++ b/typedapi/types/datastreamlifecyclerolloverconditions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataStreamLifecycleRolloverConditions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DataStreamLifecycle.ts#L57-L69 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStreamLifecycle.ts#L60-L72 type DataStreamLifecycleRolloverConditions struct { MaxAge *string `json:"max_age,omitempty"` MaxDocs *int64 `json:"max_docs,omitempty"` @@ -168,3 +168,13 @@ func NewDataStreamLifecycleRolloverConditions() *DataStreamLifecycleRolloverCond return r } + +// true + +type DataStreamLifecycleRolloverConditionsVariant interface { + DataStreamLifecycleRolloverConditionsCaster() *DataStreamLifecycleRolloverConditions +} + +func (s *DataStreamLifecycleRolloverConditions) DataStreamLifecycleRolloverConditionsCaster() *DataStreamLifecycleRolloverConditions { + return s +} diff --git a/typedapi/types/datastreamlifecyclewithrollover.go b/typedapi/types/datastreamlifecyclewithrollover.go index 249304c743..3239450753 100644 --- a/typedapi/types/datastreamlifecyclewithrollover.go +++ b/typedapi/types/datastreamlifecyclewithrollover.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,11 +26,12 @@ import ( "errors" "fmt" "io" + "strconv" ) // DataStreamLifecycleWithRollover type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DataStreamLifecycle.ts#L33-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStreamLifecycle.ts#L47-L58 type DataStreamLifecycleWithRollover struct { // DataRetention If defined, every document added to this data stream will be stored at least // for this time frame. @@ -40,6 +41,10 @@ type DataStreamLifecycleWithRollover struct { // Downsampling The downsampling configuration to execute for the managed backing index after // rollover. Downsampling *DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` + // Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this + // data stream. A data stream lifecycle + // that's disabled (enabled: `false`) will have no effect on the data stream. + Enabled *bool `json:"enabled,omitempty"` // Rollover The conditions which will trigger the rollover of a backing index as // configured by the cluster setting `cluster.lifecycle.default.rollover`. // This property is an implementation detail and it will only be retrieved when @@ -73,6 +78,20 @@ func (s *DataStreamLifecycleWithRollover) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Downsampling", err) } + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + case "rollover": if err := dec.Decode(&s.Rollover); err != nil { return fmt.Errorf("%s | %w", "Rollover", err) @@ -89,3 +108,13 @@ func NewDataStreamLifecycleWithRollover() *DataStreamLifecycleWithRollover { return r } + +// true + +type DataStreamLifecycleWithRolloverVariant interface { + DataStreamLifecycleWithRolloverCaster() *DataStreamLifecycleWithRollover +} + +func (s *DataStreamLifecycleWithRollover) DataStreamLifecycleWithRolloverCaster() *DataStreamLifecycleWithRollover { + return s +} diff --git a/typedapi/types/datastreamnames.go b/typedapi/types/datastreamnames.go index c80c357af7..337daa397b 100644 --- a/typedapi/types/datastreamnames.go +++ b/typedapi/types/datastreamnames.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DataStreamNames type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L94-L94 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L95-L95 type DataStreamNames []string diff --git a/typedapi/types/datastreams.go b/typedapi/types/datastreams.go index 7e5a7eacda..00a0c90aac 100644 --- a/typedapi/types/datastreams.go +++ b/typedapi/types/datastreams.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataStreams type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L81-L84 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L78-L81 type DataStreams struct { Available bool `json:"available"` DataStreams int64 `json:"data_streams"` @@ -123,3 +123,5 @@ func NewDataStreams() *DataStreams { return r } + +// false diff --git a/typedapi/types/datastreamsstatsitem.go b/typedapi/types/datastreamsstatsitem.go index b899f895b2..7914559569 100644 --- a/typedapi/types/datastreamsstatsitem.go +++ b/typedapi/types/datastreamsstatsitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataStreamsStatsItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L45-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L45-L65 type DataStreamsStatsItem struct { // BackingIndices Current number of backing indices for the data stream. BackingIndices int `json:"backing_indices"` @@ -124,3 +124,5 @@ func NewDataStreamsStatsItem() *DataStreamsStatsItem { return r } + +// false diff --git a/typedapi/types/datastreamstats.go b/typedapi/types/datastreamstats.go new file mode 100644 index 0000000000..a2cadfdc0c --- /dev/null +++ b/typedapi/types/datastreamstats.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L46-L59 +type DataStreamStats struct { + // BackingIndicesInError The count of the backing indices for the data stream. + BackingIndicesInError int `json:"backing_indices_in_error"` + // BackingIndicesInTotal The count of the backing indices for the data stream that have encountered an + // error. + BackingIndicesInTotal int `json:"backing_indices_in_total"` + // Name The name of the data stream. + Name string `json:"name"` +} + +func (s *DataStreamStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "backing_indices_in_error": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BackingIndicesInError", err) + } + s.BackingIndicesInError = value + case float64: + f := int(v) + s.BackingIndicesInError = f + } + + case "backing_indices_in_total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BackingIndicesInTotal", err) + } + s.BackingIndicesInTotal = value + case float64: + f := int(v) + s.BackingIndicesInTotal = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewDataStreamStats returns a DataStreamStats. +func NewDataStreamStats() *DataStreamStats { + r := &DataStreamStats{} + + return r +} + +// false diff --git a/typedapi/types/datastreamtimestamp.go b/typedapi/types/datastreamtimestamp.go index 96d0dc6199..cf6b0ccdd0 100644 --- a/typedapi/types/datastreamtimestamp.go +++ b/typedapi/types/datastreamtimestamp.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataStreamTimestamp type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/TypeMapping.ts#L59-L61 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/TypeMapping.ts#L59-L61 type DataStreamTimestamp struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewDataStreamTimestamp() *DataStreamTimestamp { return r } + +// true + +type DataStreamTimestampVariant interface { + DataStreamTimestampCaster() *DataStreamTimestamp +} + +func (s *DataStreamTimestamp) DataStreamTimestampCaster() *DataStreamTimestamp { + return s +} diff --git a/typedapi/types/datastreamtimestampfield.go b/typedapi/types/datastreamtimestampfield.go index 3706f07197..9e84058550 100644 --- a/typedapi/types/datastreamtimestampfield.go +++ b/typedapi/types/datastreamtimestampfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DataStreamTimestampField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DataStream.ts#L114-L119 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStream.ts#L129-L134 type DataStreamTimestampField struct { // Name Name of the timestamp field for the data stream, which must be `@timestamp`. // The `@timestamp` field must be included in every document indexed to the data @@ -69,3 +69,5 @@ func NewDataStreamTimestampField() *DataStreamTimestampField { return r } + +// false diff --git a/typedapi/types/datastreamvisibility.go b/typedapi/types/datastreamvisibility.go index 85dcc35370..e8e5e8a3a8 100644 --- a/typedapi/types/datastreamvisibility.go +++ b/typedapi/types/datastreamvisibility.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,9 +31,10 @@ import ( // DataStreamVisibility type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DataStream.ts#L144-L146 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStream.ts#L159-L162 type DataStreamVisibility struct { - Hidden *bool `json:"hidden,omitempty"` + AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` + Hidden *bool `json:"hidden,omitempty"` } func (s *DataStreamVisibility) UnmarshalJSON(data []byte) error { @@ -51,6 +52,20 @@ func (s *DataStreamVisibility) UnmarshalJSON(data []byte) error { switch t { + case "allow_custom_routing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowCustomRouting", err) + } + s.AllowCustomRouting = &value + case bool: + s.AllowCustomRouting = &v + } + case "hidden": var tmp any dec.Decode(&tmp) @@ -76,3 +91,13 @@ func NewDataStreamVisibility() *DataStreamVisibility { return r } + +// true + +type DataStreamVisibilityVariant interface { + DataStreamVisibilityCaster() *DataStreamVisibility +} + +func (s *DataStreamVisibility) DataStreamVisibilityCaster() *DataStreamVisibility { + return s +} diff --git a/typedapi/types/datastreamwithlifecycle.go b/typedapi/types/datastreamwithlifecycle.go index d309fe5143..0d5299d696 100644 --- a/typedapi/types/datastreamwithlifecycle.go +++ b/typedapi/types/datastreamwithlifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,10 +30,10 @@ import ( // DataStreamWithLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30 type DataStreamWithLifecycle struct { - Lifecycle *DataStreamLifecycle `json:"lifecycle,omitempty"` - Name string `json:"name"` + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + Name string `json:"name"` } func (s *DataStreamWithLifecycle) UnmarshalJSON(data []byte) error { @@ -72,3 +72,5 @@ func NewDataStreamWithLifecycle() *DataStreamWithLifecycle { return r } + +// false diff --git a/typedapi/types/datatierphasestatistics.go b/typedapi/types/datatierphasestatistics.go index d14aff27b9..ba843d572a 100644 --- a/typedapi/types/datatierphasestatistics.go +++ b/typedapi/types/datatierphasestatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataTierPhaseStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L86-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L83-L94 type DataTierPhaseStatistics struct { DocCount int64 `json:"doc_count"` IndexCount int64 `json:"index_count"` @@ -221,3 +221,5 @@ func NewDataTierPhaseStatistics() *DataTierPhaseStatistics { return r } + +// false diff --git a/typedapi/types/datatiers.go b/typedapi/types/datatiers.go index 4d01071d98..07771f7aa4 100644 --- a/typedapi/types/datatiers.go +++ b/typedapi/types/datatiers.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DataTiers type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L339-L349 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L349-L359 type DataTiers struct { Available bool `json:"available"` DataCold DataTierPhaseStatistics `json:"data_cold"` @@ -121,3 +121,5 @@ func NewDataTiers() *DataTiers { return r } + +// false diff --git a/typedapi/types/datedecayfunction.go b/typedapi/types/datedecayfunction.go index 9f4db28390..1176914434 100644 --- a/typedapi/types/datedecayfunction.go +++ b/typedapi/types/datedecayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,7 +29,7 @@ import ( // DateDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L196-L196 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L209-L209 type DateDecayFunction struct { DecayFunctionBaseDateMathDuration map[string]DecayPlacementDateMathDuration `json:"-"` // MultiValueMode Determines how the distance is calculated when a field used for computing the @@ -69,8 +69,18 @@ func (s DateDecayFunction) MarshalJSON() ([]byte, error) { // NewDateDecayFunction returns a DateDecayFunction. func NewDateDecayFunction() *DateDecayFunction { r := &DateDecayFunction{ - DecayFunctionBaseDateMathDuration: make(map[string]DecayPlacementDateMathDuration, 0), + DecayFunctionBaseDateMathDuration: make(map[string]DecayPlacementDateMathDuration), } return r } + +// true + +type DateDecayFunctionVariant interface { + DateDecayFunctionCaster() *DateDecayFunction +} + +func (s *DateDecayFunction) DateDecayFunctionCaster() *DateDecayFunction { + return s +} diff --git a/typedapi/types/datedistancefeaturequery.go b/typedapi/types/datedistancefeaturequery.go index 9a7a7d46b0..97918b907a 100644 --- a/typedapi/types/datedistancefeaturequery.go +++ b/typedapi/types/datedistancefeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DateDistanceFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L72-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L71-L74 type DateDistanceFeatureQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -131,3 +131,13 @@ func NewDateDistanceFeatureQuery() *DateDistanceFeatureQuery { return r } + +// true + +type DateDistanceFeatureQueryVariant interface { + DateDistanceFeatureQueryCaster() *DateDistanceFeatureQuery +} + +func (s *DateDistanceFeatureQuery) DateDistanceFeatureQueryCaster() *DateDistanceFeatureQuery { + return s +} diff --git a/typedapi/types/datehistogramaggregate.go b/typedapi/types/datehistogramaggregate.go index e14bc8b0c6..923a64ce21 100644 --- a/typedapi/types/datehistogramaggregate.go +++ b/typedapi/types/datehistogramaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DateHistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L350-L351 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L383-L386 type DateHistogramAggregate struct { Buckets BucketsDateHistogramBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewDateHistogramAggregate() *DateHistogramAggregate { return r } + +// false diff --git a/typedapi/types/datehistogramaggregation.go b/typedapi/types/datehistogramaggregation.go index 4e3b580faa..c6f2432aad 100644 --- a/typedapi/types/datehistogramaggregation.go +++ b/typedapi/types/datehistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // DateHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L191-L249 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L202-L260 type DateHistogramAggregation struct { // CalendarInterval Calendar-aware interval. // Can be specified using the unit name, such as `month`, or as a single unit @@ -220,8 +220,18 @@ func (s *DateHistogramAggregation) UnmarshalJSON(data []byte) error { // NewDateHistogramAggregation returns a DateHistogramAggregation. func NewDateHistogramAggregation() *DateHistogramAggregation { r := &DateHistogramAggregation{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type DateHistogramAggregationVariant interface { + DateHistogramAggregationCaster() *DateHistogramAggregation +} + +func (s *DateHistogramAggregation) DateHistogramAggregationCaster() *DateHistogramAggregation { + return s +} diff --git a/typedapi/types/datehistogrambucket.go b/typedapi/types/datehistogrambucket.go index d839342ca3..bc161680e7 100644 --- a/typedapi/types/datehistogrambucket.go +++ b/typedapi/types/datehistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // DateHistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L353-L356 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L388-L391 type DateHistogramBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -504,6 +504,13 @@ func (s *DateHistogramBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -637,8 +644,10 @@ func (s DateHistogramBucket) MarshalJSON() ([]byte, error) { // NewDateHistogramBucket returns a DateHistogramBucket. func NewDateHistogramBucket() *DateHistogramBucket { r := &DateHistogramBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/datehistogramgrouping.go b/typedapi/types/datehistogramgrouping.go index 74e3c30990..7659a9c125 100644 --- a/typedapi/types/datehistogramgrouping.go +++ b/typedapi/types/datehistogramgrouping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DateHistogramGrouping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/_types/Groupings.ts#L42-L73 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/_types/Groupings.ts#L42-L73 type DateHistogramGrouping struct { // CalendarInterval The interval of time buckets to be generated when rolling up. CalendarInterval Duration `json:"calendar_interval,omitempty"` @@ -124,3 +124,13 @@ func NewDateHistogramGrouping() *DateHistogramGrouping { return r } + +// true + +type DateHistogramGroupingVariant interface { + DateHistogramGroupingCaster() *DateHistogramGrouping +} + +func (s *DateHistogramGrouping) DateHistogramGroupingCaster() *DateHistogramGrouping { + return s +} diff --git a/typedapi/types/dateindexnameprocessor.go b/typedapi/types/dateindexnameprocessor.go index af5b4ee9c3..b7cf6be14a 100644 --- a/typedapi/types/dateindexnameprocessor.go +++ b/typedapi/types/dateindexnameprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DateIndexNameProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L502-L540 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L729-L767 type DateIndexNameProcessor struct { // DateFormats An array of the expected date formats for parsing dates / timestamps in the // document being preprocessed. @@ -224,3 +224,13 @@ func NewDateIndexNameProcessor() *DateIndexNameProcessor { return r } + +// true + +type DateIndexNameProcessorVariant interface { + DateIndexNameProcessorCaster() *DateIndexNameProcessor +} + +func (s *DateIndexNameProcessor) DateIndexNameProcessorCaster() *DateIndexNameProcessor { + return s +} diff --git a/typedapi/types/datenanosproperty.go b/typedapi/types/datenanosproperty.go index 293d9bcd0f..6beaca84e3 100644 --- a/typedapi/types/datenanosproperty.go +++ b/typedapi/types/datenanosproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,13 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DateNanosProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L79-L87 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L80-L90 type DateNanosProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -45,13 +47,15 @@ type DateNanosProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue DateTime `json:"null_value,omitempty"` - PrecisionStep *int `json:"precision_step,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue DateTime `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + PrecisionStep *int `json:"precision_step,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { @@ -139,301 +143,313 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -508,6 +524,11 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "NullValue", err) } + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + case "precision_step": var tmp any @@ -543,317 +564,322 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) } - s.Similarity = &o case "store": var tmp any @@ -869,6 +895,11 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -883,22 +914,24 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { func (s DateNanosProperty) MarshalJSON() ([]byte, error) { type innerDateNanosProperty DateNanosProperty tmp := innerDateNanosProperty{ - Boost: s.Boost, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - Format: s.Format, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - Index: s.Index, - Meta: s.Meta, - NullValue: s.NullValue, - PrecisionStep: s.PrecisionStep, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "date_nanos" @@ -909,10 +942,20 @@ func (s DateNanosProperty) MarshalJSON() ([]byte, error) { // NewDateNanosProperty returns a DateNanosProperty. func NewDateNanosProperty() *DateNanosProperty { r := &DateNanosProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DateNanosPropertyVariant interface { + DateNanosPropertyCaster() *DateNanosProperty +} + +func (s *DateNanosProperty) DateNanosPropertyCaster() *DateNanosProperty { + return s +} diff --git a/typedapi/types/dateprocessor.go b/typedapi/types/dateprocessor.go index ece4a4b6fe..b3b8b4523d 100644 --- a/typedapi/types/dateprocessor.go +++ b/typedapi/types/dateprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DateProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L542-L569 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L769-L802 type DateProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -52,6 +52,9 @@ type DateProcessor struct { Locale *string `json:"locale,omitempty"` // OnFailure Handle failures for the processor. OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // OutputFormat The format to use when writing the date to target_field. Must be a valid + // java time pattern. + OutputFormat *string `json:"output_format,omitempty"` // Tag Identifier for the processor. // Useful for debugging and metrics. Tag *string `json:"tag,omitempty"` @@ -142,6 +145,18 @@ func (s *DateProcessor) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "OnFailure", err) } + case "output_format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OutputFormat", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OutputFormat = &o + case "tag": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -182,3 +197,13 @@ func NewDateProcessor() *DateProcessor { return r } + +// true + +type DateProcessorVariant interface { + DateProcessorCaster() *DateProcessor +} + +func (s *DateProcessor) DateProcessorCaster() *DateProcessor { + return s +} diff --git a/typedapi/types/dateproperty.go b/typedapi/types/dateproperty.go index 8bea067ec8..c086f9e5de 100644 --- a/typedapi/types/dateproperty.go +++ b/typedapi/types/dateproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,13 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DateProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L67-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L66-L78 type DateProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -47,13 +49,15 @@ type DateProperty struct { Index *bool `json:"index,omitempty"` Locale *string `json:"locale,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue DateTime `json:"null_value,omitempty"` - PrecisionStep *int `json:"precision_step,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue DateTime `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + PrecisionStep *int `json:"precision_step,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DateProperty) UnmarshalJSON(data []byte) error { @@ -146,301 +150,313 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -527,6 +543,11 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "NullValue", err) } + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + case "precision_step": var tmp any @@ -562,317 +583,322 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) } - s.Similarity = &o case "store": var tmp any @@ -888,6 +914,11 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -902,24 +933,26 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { func (s DateProperty) MarshalJSON() ([]byte, error) { type innerDateProperty DateProperty tmp := innerDateProperty{ - Boost: s.Boost, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fielddata: s.Fielddata, - Fields: s.Fields, - Format: s.Format, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - Index: s.Index, - Locale: s.Locale, - Meta: s.Meta, - NullValue: s.NullValue, - PrecisionStep: s.PrecisionStep, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fielddata: s.Fielddata, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Locale: s.Locale, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "date" @@ -930,10 +963,20 @@ func (s DateProperty) MarshalJSON() ([]byte, error) { // NewDateProperty returns a DateProperty. func NewDateProperty() *DateProperty { r := &DateProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DatePropertyVariant interface { + DatePropertyCaster() *DateProperty +} + +func (s *DateProperty) DatePropertyCaster() *DateProperty { + return s +} diff --git a/typedapi/types/daterangeaggregate.go b/typedapi/types/daterangeaggregate.go index c4f40ca8ec..46d40bc38b 100644 --- a/typedapi/types/daterangeaggregate.go +++ b/typedapi/types/daterangeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DateRangeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L547-L552 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L609-L615 type DateRangeAggregate struct { Buckets BucketsRangeBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewDateRangeAggregate() *DateRangeAggregate { return r } + +// false diff --git a/typedapi/types/daterangeaggregation.go b/typedapi/types/daterangeaggregation.go index 200c7baa48..1788ce892c 100644 --- a/typedapi/types/daterangeaggregation.go +++ b/typedapi/types/daterangeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DateRangeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L270-L296 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L281-L307 type DateRangeAggregation struct { // Field The date field whose values are use to build ranges. Field *string `json:"field,omitempty"` @@ -121,3 +121,13 @@ func NewDateRangeAggregation() *DateRangeAggregation { return r } + +// true + +type DateRangeAggregationVariant interface { + DateRangeAggregationCaster() *DateRangeAggregation +} + +func (s *DateRangeAggregation) DateRangeAggregationCaster() *DateRangeAggregation { + return s +} diff --git a/typedapi/types/daterangeexpression.go b/typedapi/types/daterangeexpression.go index 67af5c2cc0..ff140a84b9 100644 --- a/typedapi/types/daterangeexpression.go +++ b/typedapi/types/daterangeexpression.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DateRangeExpression type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L307-L320 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L318-L331 type DateRangeExpression struct { // From Start of the range (inclusive). From FieldDateMath `json:"from,omitempty"` @@ -89,3 +89,13 @@ func NewDateRangeExpression() *DateRangeExpression { return r } + +// true + +type DateRangeExpressionVariant interface { + DateRangeExpressionCaster() *DateRangeExpression +} + +func (s *DateRangeExpression) DateRangeExpressionCaster() *DateRangeExpression { + return s +} diff --git a/typedapi/types/daterangeproperty.go b/typedapi/types/daterangeproperty.go index 3cdfbeb794..6cb1aa3765 100644 --- a/typedapi/types/daterangeproperty.go +++ b/typedapi/types/daterangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DateRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/range.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/range.ts#L29-L32 type DateRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -45,11 +46,11 @@ type DateRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { @@ -151,301 +152,313 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -520,318 +533,318 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -846,6 +859,11 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -860,20 +878,20 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { func (s DateRangeProperty) MarshalJSON() ([]byte, error) { type innerDateRangeProperty DateRangeProperty tmp := innerDateRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - Format: s.Format, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "date_range" @@ -884,10 +902,20 @@ func (s DateRangeProperty) MarshalJSON() ([]byte, error) { // NewDateRangeProperty returns a DateRangeProperty. func NewDateRangeProperty() *DateRangeProperty { r := &DateRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DateRangePropertyVariant interface { + DateRangePropertyCaster() *DateRangeProperty +} + +func (s *DateRangeProperty) DateRangePropertyCaster() *DateRangeProperty { + return s +} diff --git a/typedapi/types/daterangequery.go b/typedapi/types/daterangequery.go index b4c16e2fb1..dec94698f6 100644 --- a/typedapi/types/daterangequery.go +++ b/typedapi/types/daterangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DateRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L146-L155 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L161-L170 type DateRangeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -160,3 +160,13 @@ func NewDateRangeQuery() *DateRangeQuery { return r } + +// true + +type DateRangeQueryVariant interface { + DateRangeQueryCaster() *DateRangeQuery +} + +func (s *DateRangeQuery) DateRangeQueryCaster() *DateRangeQuery { + return s +} diff --git a/typedapi/types/datetime.go b/typedapi/types/datetime.go index 418927d1db..174e4efae3 100644 --- a/typedapi/types/datetime.go +++ b/typedapi/types/datetime.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // int64 // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Time.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Time.ts#L22-L27 type DateTime any + +type DateTimeVariant interface { + DateTimeCaster() *DateTime +} diff --git a/typedapi/types/decayfunction.go b/typedapi/types/decayfunction.go index a1402209ad..fee1f48725 100644 --- a/typedapi/types/decayfunction.go +++ b/typedapi/types/decayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,5 +27,9 @@ package types // NumericDecayFunction // GeoDecayFunction // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L202-L211 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L215-L224 type DecayFunction any + +type DecayFunctionVariant interface { + DecayFunctionCaster() *DecayFunction +} diff --git a/typedapi/types/decayfunctionbasedatemathduration.go b/typedapi/types/decayfunctionbasedatemathduration.go deleted file mode 100644 index 232b2f0a5c..0000000000 --- a/typedapi/types/decayfunctionbasedatemathduration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "encoding/json" - "fmt" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" -) - -// DecayFunctionBaseDateMathDuration type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L178-L189 -type DecayFunctionBaseDateMathDuration struct { - DecayFunctionBaseDateMathDuration map[string]DecayPlacementDateMathDuration `json:"-"` - // MultiValueMode Determines how the distance is calculated when a field used for computing the - // decay contains multiple values. - MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` -} - -// MarhsalJSON overrides marshalling for types with additional properties -func (s DecayFunctionBaseDateMathDuration) MarshalJSON() ([]byte, error) { - type opt DecayFunctionBaseDateMathDuration - // We transform the struct to a map without the embedded additional properties map - tmp := make(map[string]any, 0) - - data, err := json.Marshal(opt(s)) - if err != nil { - return nil, err - } - err = json.Unmarshal(data, &tmp) - if err != nil { - return nil, err - } - - // We inline the additional fields from the underlying map - for key, value := range s.DecayFunctionBaseDateMathDuration { - tmp[fmt.Sprintf("%s", key)] = value - } - delete(tmp, "DecayFunctionBaseDateMathDuration") - - data, err = json.Marshal(tmp) - if err != nil { - return nil, err - } - - return data, nil -} - -// NewDecayFunctionBaseDateMathDuration returns a DecayFunctionBaseDateMathDuration. -func NewDecayFunctionBaseDateMathDuration() *DecayFunctionBaseDateMathDuration { - r := &DecayFunctionBaseDateMathDuration{ - DecayFunctionBaseDateMathDuration: make(map[string]DecayPlacementDateMathDuration, 0), - } - - return r -} diff --git a/typedapi/types/decayfunctionbasedoubledouble.go b/typedapi/types/decayfunctionbasedoubledouble.go deleted file mode 100644 index 41ce83d337..0000000000 --- a/typedapi/types/decayfunctionbasedoubledouble.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "encoding/json" - "fmt" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" -) - -// DecayFunctionBasedoubledouble type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L178-L189 -type DecayFunctionBasedoubledouble struct { - DecayFunctionBasedoubledouble map[string]DecayPlacementdoubledouble `json:"-"` - // MultiValueMode Determines how the distance is calculated when a field used for computing the - // decay contains multiple values. - MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` -} - -// MarhsalJSON overrides marshalling for types with additional properties -func (s DecayFunctionBasedoubledouble) MarshalJSON() ([]byte, error) { - type opt DecayFunctionBasedoubledouble - // We transform the struct to a map without the embedded additional properties map - tmp := make(map[string]any, 0) - - data, err := json.Marshal(opt(s)) - if err != nil { - return nil, err - } - err = json.Unmarshal(data, &tmp) - if err != nil { - return nil, err - } - - // We inline the additional fields from the underlying map - for key, value := range s.DecayFunctionBasedoubledouble { - tmp[fmt.Sprintf("%s", key)] = value - } - delete(tmp, "DecayFunctionBasedoubledouble") - - data, err = json.Marshal(tmp) - if err != nil { - return nil, err - } - - return data, nil -} - -// NewDecayFunctionBasedoubledouble returns a DecayFunctionBasedoubledouble. -func NewDecayFunctionBasedoubledouble() *DecayFunctionBasedoubledouble { - r := &DecayFunctionBasedoubledouble{ - DecayFunctionBasedoubledouble: make(map[string]DecayPlacementdoubledouble, 0), - } - - return r -} diff --git a/typedapi/types/decayfunctionbasegeolocationdistance.go b/typedapi/types/decayfunctionbasegeolocationdistance.go deleted file mode 100644 index f7fd6510bd..0000000000 --- a/typedapi/types/decayfunctionbasegeolocationdistance.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "encoding/json" - "fmt" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" -) - -// DecayFunctionBaseGeoLocationDistance type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L178-L189 -type DecayFunctionBaseGeoLocationDistance struct { - DecayFunctionBaseGeoLocationDistance map[string]DecayPlacementGeoLocationDistance `json:"-"` - // MultiValueMode Determines how the distance is calculated when a field used for computing the - // decay contains multiple values. - MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` -} - -// MarhsalJSON overrides marshalling for types with additional properties -func (s DecayFunctionBaseGeoLocationDistance) MarshalJSON() ([]byte, error) { - type opt DecayFunctionBaseGeoLocationDistance - // We transform the struct to a map without the embedded additional properties map - tmp := make(map[string]any, 0) - - data, err := json.Marshal(opt(s)) - if err != nil { - return nil, err - } - err = json.Unmarshal(data, &tmp) - if err != nil { - return nil, err - } - - // We inline the additional fields from the underlying map - for key, value := range s.DecayFunctionBaseGeoLocationDistance { - tmp[fmt.Sprintf("%s", key)] = value - } - delete(tmp, "DecayFunctionBaseGeoLocationDistance") - - data, err = json.Marshal(tmp) - if err != nil { - return nil, err - } - - return data, nil -} - -// NewDecayFunctionBaseGeoLocationDistance returns a DecayFunctionBaseGeoLocationDistance. -func NewDecayFunctionBaseGeoLocationDistance() *DecayFunctionBaseGeoLocationDistance { - r := &DecayFunctionBaseGeoLocationDistance{ - DecayFunctionBaseGeoLocationDistance: make(map[string]DecayPlacementGeoLocationDistance, 0), - } - - return r -} diff --git a/typedapi/types/decayplacement.go b/typedapi/types/decayplacement.go index f2b71aa268..522b5da388 100644 --- a/typedapi/types/decayplacement.go +++ b/typedapi/types/decayplacement.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DecayPlacement type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L157-L176 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L170-L189 type DecayPlacement struct { // Decay Defines how documents are scored at the distance given at scale. Decay *Float64 `json:"decay,omitempty"` @@ -103,3 +103,13 @@ func NewDecayPlacement() *DecayPlacement { return r } + +// true + +type DecayPlacementVariant interface { + DecayPlacementCaster() *DecayPlacement +} + +func (s *DecayPlacement) DecayPlacementCaster() *DecayPlacement { + return s +} diff --git a/typedapi/types/decayplacementdatemathduration.go b/typedapi/types/decayplacementdatemathduration.go index 8555d994eb..2ea279aa9c 100644 --- a/typedapi/types/decayplacementdatemathduration.go +++ b/typedapi/types/decayplacementdatemathduration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DecayPlacementDateMathDuration type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L157-L176 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L170-L189 type DecayPlacementDateMathDuration struct { // Decay Defines how documents are scored at the distance given at scale. Decay *Float64 `json:"decay,omitempty"` @@ -103,3 +103,13 @@ func NewDecayPlacementDateMathDuration() *DecayPlacementDateMathDuration { return r } + +// true + +type DecayPlacementDateMathDurationVariant interface { + DecayPlacementDateMathDurationCaster() *DecayPlacementDateMathDuration +} + +func (s *DecayPlacementDateMathDuration) DecayPlacementDateMathDurationCaster() *DecayPlacementDateMathDuration { + return s +} diff --git a/typedapi/types/decayplacementdoubledouble.go b/typedapi/types/decayplacementdoubledouble.go index 9400cb0894..ba33b6b7aa 100644 --- a/typedapi/types/decayplacementdoubledouble.go +++ b/typedapi/types/decayplacementdoubledouble.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DecayPlacementdoubledouble type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L157-L176 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L170-L189 type DecayPlacementdoubledouble struct { // Decay Defines how documents are scored at the distance given at scale. Decay *Float64 `json:"decay,omitempty"` @@ -136,3 +136,13 @@ func NewDecayPlacementdoubledouble() *DecayPlacementdoubledouble { return r } + +// true + +type DecayPlacementdoubledoubleVariant interface { + DecayPlacementdoubledoubleCaster() *DecayPlacementdoubledouble +} + +func (s *DecayPlacementdoubledouble) DecayPlacementdoubledoubleCaster() *DecayPlacementdoubledouble { + return s +} diff --git a/typedapi/types/decayplacementgeolocationdistance.go b/typedapi/types/decayplacementgeolocationdistance.go index 396cbb7790..fdca67e8e5 100644 --- a/typedapi/types/decayplacementgeolocationdistance.go +++ b/typedapi/types/decayplacementgeolocationdistance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DecayPlacementGeoLocationDistance type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L157-L176 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L170-L189 type DecayPlacementGeoLocationDistance struct { // Decay Defines how documents are scored at the distance given at scale. Decay *Float64 `json:"decay,omitempty"` @@ -143,3 +143,13 @@ func NewDecayPlacementGeoLocationDistance() *DecayPlacementGeoLocationDistance { return r } + +// true + +type DecayPlacementGeoLocationDistanceVariant interface { + DecayPlacementGeoLocationDistanceCaster() *DecayPlacementGeoLocationDistance +} + +func (s *DecayPlacementGeoLocationDistance) DecayPlacementGeoLocationDistanceCaster() *DecayPlacementGeoLocationDistance { + return s +} diff --git a/typedapi/types/defaults.go b/typedapi/types/defaults.go index 190a1267c6..2d9ba0fec4 100644 --- a/typedapi/types/defaults.go +++ b/typedapi/types/defaults.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Defaults type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/info/types.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/info/types.ts#L24-L27 type Defaults struct { AnomalyDetectors AnomalyDetectors `json:"anomaly_detectors"` Datafeeds Datafeeds `json:"datafeeds"` @@ -34,3 +34,5 @@ func NewDefaults() *Defaults { return r } + +// false diff --git a/typedapi/types/definition.go b/typedapi/types/definition.go index 436e190cc9..848715f9fa 100644 --- a/typedapi/types/definition.go +++ b/typedapi/types/definition.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Definition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L24-L29 type Definition struct { // Preprocessors Collection of preprocessors Preprocessors []Preprocessor `json:"preprocessors,omitempty"` @@ -36,3 +36,13 @@ func NewDefinition() *Definition { return r } + +// true + +type DefinitionVariant interface { + DefinitionCaster() *Definition +} + +func (s *Definition) DefinitionCaster() *Definition { + return s +} diff --git a/typedapi/types/delayeddatacheckconfig.go b/typedapi/types/delayeddatacheckconfig.go index 222f735fde..08e7c8d6d8 100644 --- a/typedapi/types/delayeddatacheckconfig.go +++ b/typedapi/types/delayeddatacheckconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DelayedDataCheckConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L118-L129 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L122-L133 type DelayedDataCheckConfig struct { // CheckWindow The window of time that is searched for late data. This window of time ends // with the latest finalized bucket. @@ -89,3 +89,13 @@ func NewDelayedDataCheckConfig() *DelayedDataCheckConfig { return r } + +// true + +type DelayedDataCheckConfigVariant interface { + DelayedDataCheckConfigCaster() *DelayedDataCheckConfig +} + +func (s *DelayedDataCheckConfig) DelayedDataCheckConfigCaster() *DelayedDataCheckConfig { + return s +} diff --git a/typedapi/types/deleteaction.go b/typedapi/types/deleteaction.go index cd5e2b2c9d..a64bb25e60 100644 --- a/typedapi/types/deleteaction.go +++ b/typedapi/types/deleteaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DeleteAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L152-L154 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L149-L151 type DeleteAction struct { DeleteSearchableSnapshot *bool `json:"delete_searchable_snapshot,omitempty"` } @@ -76,3 +76,13 @@ func NewDeleteAction() *DeleteAction { return r } + +// true + +type DeleteActionVariant interface { + DeleteActionCaster() *DeleteAction +} + +func (s *DeleteAction) DeleteActionCaster() *DeleteAction { + return s +} diff --git a/typedapi/types/deleteoperation.go b/typedapi/types/deleteoperation.go index 7faa9c7940..f21584c9af 100644 --- a/typedapi/types/deleteoperation.go +++ b/typedapi/types/deleteoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,15 +33,15 @@ import ( // DeleteOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/types.ts#L134-L134 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/types.ts#L144-L144 type DeleteOperation struct { // Id_ The document ID. Id_ *string `json:"_id,omitempty"` IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. + // Index_ The name of the index or index alias to perform the action on. Index_ *string `json:"_index,omitempty"` - // Routing Custom value used to route operations to a specific shard. + // Routing A custom value used to route operations to a specific shard. Routing *string `json:"routing,omitempty"` Version *int64 `json:"version,omitempty"` VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -118,3 +118,13 @@ func NewDeleteOperation() *DeleteOperation { return r } + +// true + +type DeleteOperationVariant interface { + DeleteOperationCaster() *DeleteOperation +} + +func (s *DeleteOperation) DeleteOperationCaster() *DeleteOperation { + return s +} diff --git a/typedapi/types/delimitedpayloadtokenfilter.go b/typedapi/types/delimitedpayloadtokenfilter.go index 716f94cc2c..4b01f3a759 100644 --- a/typedapi/types/delimitedpayloadtokenfilter.go +++ b/typedapi/types/delimitedpayloadtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DelimitedPayloadTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L68-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L67-L71 type DelimitedPayloadTokenFilter struct { Delimiter *string `json:"delimiter,omitempty"` Encoding *delimitedpayloadencoding.DelimitedPayloadEncoding `json:"encoding,omitempty"` @@ -109,3 +109,13 @@ func NewDelimitedPayloadTokenFilter() *DelimitedPayloadTokenFilter { return r } + +// true + +type DelimitedPayloadTokenFilterVariant interface { + DelimitedPayloadTokenFilterCaster() *DelimitedPayloadTokenFilter +} + +func (s *DelimitedPayloadTokenFilter) DelimitedPayloadTokenFilterCaster() *DelimitedPayloadTokenFilter { + return s +} diff --git a/typedapi/types/densevectorindexoptions.go b/typedapi/types/densevectorindexoptions.go index 22691d4f1b..cb89837b35 100644 --- a/typedapi/types/densevectorindexoptions.go +++ b/typedapi/types/densevectorindexoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,16 +27,43 @@ import ( "fmt" "io" "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/densevectorindexoptionstype" ) // DenseVectorIndexOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/DenseVectorIndexOptions.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/DenseVectorProperty.ts#L129-L162 type DenseVectorIndexOptions struct { + // ConfidenceInterval The confidence interval to use when quantizing the vectors. Can be any value + // between and including `0.90` and + // `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic + // quantiles should be calculated for + // optimized quantization. When between `0.90` and `1.0`, this value restricts + // the values used when calculating + // the quantization thresholds. + // + // For example, a value of `0.95` will only use the middle `95%` of the values + // when calculating the quantization + // thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). + // + // Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` + // for dynamic quantile calculation. + // + // Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` + // index types. ConfidenceInterval *float32 `json:"confidence_interval,omitempty"` - EfConstruction *int `json:"ef_construction,omitempty"` - M *int `json:"m,omitempty"` - Type string `json:"type"` + // EfConstruction The number of candidates to track while assembling the list of nearest + // neighbors for each new node. + // + // Only applicable to `hnsw`, `int8_hnsw`, and `int4_hnsw` index types. + EfConstruction *int `json:"ef_construction,omitempty"` + // M The number of neighbors each node will be connected to in the HNSW graph. + // + // Only applicable to `hnsw`, `int8_hnsw`, and `int4_hnsw` index types. + M *int `json:"m,omitempty"` + // Type The type of kNN algorithm to use. + Type densevectorindexoptionstype.DenseVectorIndexOptionsType `json:"type"` } func (s *DenseVectorIndexOptions) UnmarshalJSON(data []byte) error { @@ -103,16 +130,9 @@ func (s *DenseVectorIndexOptions) UnmarshalJSON(data []byte) error { } case "type": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Type = o } } @@ -125,3 +145,13 @@ func NewDenseVectorIndexOptions() *DenseVectorIndexOptions { return r } + +// true + +type DenseVectorIndexOptionsVariant interface { + DenseVectorIndexOptionsCaster() *DenseVectorIndexOptions +} + +func (s *DenseVectorIndexOptions) DenseVectorIndexOptionsCaster() *DenseVectorIndexOptions { + return s +} diff --git a/typedapi/types/densevectorproperty.go b/typedapi/types/densevectorproperty.go index 6eaa552bcf..3913852717 100644 --- a/typedapi/types/densevectorproperty.go +++ b/typedapi/types/densevectorproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,25 +28,56 @@ import ( "io" "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/densevectorelementtype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/densevectorsimilarity" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DenseVectorProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/complex.ts#L52-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/DenseVectorProperty.ts#L23-L62 type DenseVectorProperty struct { - Dims *int `json:"dims,omitempty"` - Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` - ElementType *string `json:"element_type,omitempty"` - Fields map[string]Property `json:"fields,omitempty"` - IgnoreAbove *int `json:"ignore_above,omitempty"` - Index *bool `json:"index,omitempty"` - IndexOptions *DenseVectorIndexOptions `json:"index_options,omitempty"` + // Dims Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, + // it will be set to the length of + // the first vector added to the field. + Dims *int `json:"dims,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + // ElementType The data type used to encode vectors. The supported data types are `float` + // (default), `byte`, and `bit`. + ElementType *densevectorelementtype.DenseVectorElementType `json:"element_type,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Index If `true`, you can search this field using the kNN search API. + Index *bool `json:"index,omitempty"` + // IndexOptions An optional section that configures the kNN indexing algorithm. The HNSW + // algorithm has two internal parameters + // that influence how the data structure is built. These can be adjusted to + // improve the accuracy of results, at the + // expense of slower indexing speed. + // + // This parameter can only be specified when `index` is `true`. + IndexOptions *DenseVectorIndexOptions `json:"index_options,omitempty"` // Meta Metadata about the field. Meta map[string]string `json:"meta,omitempty"` Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Type string `json:"type,omitempty"` + // Similarity The vector similarity metric to use in kNN search. + // + // Documents are ranked by their vector field's similarity to the query vector. + // The `_score` of each document will + // be derived from the similarity, in a way that ensures scores are positive and + // that a larger score corresponds + // to a higher ranking. + // + // Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to + // `cosine`. + // + // `bit` vectors only support `l2_norm` as their similarity metric. + // + // This parameter can only be specified when `index` is `true`. + Similarity *densevectorsimilarity.DenseVectorSimilarity `json:"similarity,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { @@ -86,16 +117,9 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { } case "element_type": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.ElementType); err != nil { return fmt.Errorf("%s | %w", "ElementType", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.ElementType = &o case "fields": if s.Fields == nil { @@ -116,301 +140,313 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -478,317 +514,327 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.Similarity); err != nil { return fmt.Errorf("%s | %w", "Similarity", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) } - s.Similarity = &o case "type": if err := dec.Decode(&s.Type); err != nil { @@ -804,17 +850,18 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { func (s DenseVectorProperty) MarshalJSON() ([]byte, error) { type innerDenseVectorProperty DenseVectorProperty tmp := innerDenseVectorProperty{ - Dims: s.Dims, - Dynamic: s.Dynamic, - ElementType: s.ElementType, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - IndexOptions: s.IndexOptions, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Type: s.Type, + Dims: s.Dims, + Dynamic: s.Dynamic, + ElementType: s.ElementType, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "dense_vector" @@ -825,10 +872,20 @@ func (s DenseVectorProperty) MarshalJSON() ([]byte, error) { // NewDenseVectorProperty returns a DenseVectorProperty. func NewDenseVectorProperty() *DenseVectorProperty { r := &DenseVectorProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DenseVectorPropertyVariant interface { + DenseVectorPropertyCaster() *DenseVectorProperty +} + +func (s *DenseVectorProperty) DenseVectorPropertyCaster() *DenseVectorProperty { + return s +} diff --git a/typedapi/types/dependency.go b/typedapi/types/dependency.go index d39e321192..4a8483aca1 100644 --- a/typedapi/types/dependency.go +++ b/typedapi/types/dependency.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Dependency type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L30-L33 type Dependency struct { Field string `json:"field"` Value ScalarValue `json:"value"` @@ -80,3 +80,13 @@ func NewDependency() *Dependency { return r } + +// true + +type DependencyVariant interface { + DependencyCaster() *Dependency +} + +func (s *Dependency) DependencyCaster() *Dependency { + return s +} diff --git a/typedapi/types/deprecation.go b/typedapi/types/deprecation.go index 2dc6c51dc7..d1d76ab6fa 100644 --- a/typedapi/types/deprecation.go +++ b/typedapi/types/deprecation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,13 +33,19 @@ import ( // Deprecation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/migration/deprecations/types.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/migration/deprecations/types.ts#L32-L47 type Deprecation struct { - Details string `json:"details"` + // Details Optional details about the deprecation warning. + Details *string `json:"details,omitempty"` // Level The level property describes the significance of the issue. - Level deprecationlevel.DeprecationLevel `json:"level"` - Message string `json:"message"` - Url string `json:"url"` + Level deprecationlevel.DeprecationLevel `json:"level"` + // Message Descriptive information about the deprecation warning. + Message string `json:"message"` + Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + ResolveDuringRollingUpgrade bool `json:"resolve_during_rolling_upgrade"` + // Url A link to the breaking change documentation, where you can find more + // information about this change. + Url string `json:"url"` } func (s *Deprecation) UnmarshalJSON(data []byte) error { @@ -67,7 +73,7 @@ func (s *Deprecation) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Details = o + s.Details = &o case "level": if err := dec.Decode(&s.Level); err != nil { @@ -86,6 +92,28 @@ func (s *Deprecation) UnmarshalJSON(data []byte) error { } s.Message = o + case "_meta": + if s.Meta_ == nil { + s.Meta_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "resolve_during_rolling_upgrade": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ResolveDuringRollingUpgrade", err) + } + s.ResolveDuringRollingUpgrade = value + case bool: + s.ResolveDuringRollingUpgrade = v + } + case "url": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -105,7 +133,11 @@ func (s *Deprecation) UnmarshalJSON(data []byte) error { // NewDeprecation returns a Deprecation. func NewDeprecation() *Deprecation { - r := &Deprecation{} + r := &Deprecation{ + Meta_: make(map[string]json.RawMessage), + } return r } + +// false diff --git a/typedapi/types/deprecationindexing.go b/typedapi/types/deprecationindexing.go index 719a5c807f..6e791dc874 100644 --- a/typedapi/types/deprecationindexing.go +++ b/typedapi/types/deprecationindexing.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DeprecationIndexing type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L144-L146 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L144-L146 type DeprecationIndexing struct { Enabled string `json:"enabled"` } @@ -74,3 +74,5 @@ func NewDeprecationIndexing() *DeprecationIndexing { return r } + +// false diff --git a/typedapi/types/derivativeaggregate.go b/typedapi/types/derivativeaggregate.go index 05461583c9..42e483c375 100644 --- a/typedapi/types/derivativeaggregate.go +++ b/typedapi/types/derivativeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DerivativeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L227-L231 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L241-L248 type DerivativeAggregate struct { Meta Metadata `json:"meta,omitempty"` NormalizedValue *Float64 `json:"normalized_value,omitempty"` @@ -119,3 +119,5 @@ func NewDerivativeAggregate() *DerivativeAggregate { return r } + +// false diff --git a/typedapi/types/derivativeaggregation.go b/typedapi/types/derivativeaggregation.go index 1c1598fd7c..74fcdc0307 100644 --- a/typedapi/types/derivativeaggregation.go +++ b/typedapi/types/derivativeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DerivativeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L196-L196 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L216-L216 type DerivativeAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewDerivativeAggregation() *DerivativeAggregation { return r } + +// true + +type DerivativeAggregationVariant interface { + DerivativeAggregationCaster() *DerivativeAggregation +} + +func (s *DerivativeAggregation) DerivativeAggregationCaster() *DerivativeAggregation { + return s +} diff --git a/typedapi/types/detailsinfo.go b/typedapi/types/detailsinfo.go new file mode 100644 index 0000000000..29411f0b1c --- /dev/null +++ b/typedapi/types/detailsinfo.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DetailsInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L286-L321 +type DetailsInfo struct { + // Blob A description of the blob that was written and read. + Blob BlobDetails `json:"blob"` + // OverwriteElapsed The elapsed time spent overwriting the blob. + // If the blob was not overwritten, this information is omitted. + OverwriteElapsed Duration `json:"overwrite_elapsed,omitempty"` + // OverwriteElapsedNanos The elapsed time spent overwriting the blob, in nanoseconds. + // If the blob was not overwritten, this information is omitted. + OverwriteElapsedNanos *int64 `json:"overwrite_elapsed_nanos,omitempty"` + // WriteElapsed The elapsed time spent writing the blob. + WriteElapsed Duration `json:"write_elapsed"` + // WriteElapsedNanos The elapsed time spent writing the blob, in nanoseconds. + WriteElapsedNanos int64 `json:"write_elapsed_nanos"` + // WriteThrottled The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or + // `indices.recovery.max_bytes_per_sec` if the recovery settings for managed + // services are set) throttle while writing the blob. + WriteThrottled Duration `json:"write_throttled"` + // WriteThrottledNanos The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or + // `indices.recovery.max_bytes_per_sec` if the recovery settings for managed + // services are set) throttle while writing the blob, in nanoseconds. + WriteThrottledNanos int64 `json:"write_throttled_nanos"` + // WriterNode The node which wrote the blob and coordinated the read operations. + WriterNode SnapshotNodeInfo `json:"writer_node"` +} + +func (s *DetailsInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "blob": + if err := dec.Decode(&s.Blob); err != nil { + return fmt.Errorf("%s | %w", "Blob", err) + } + + case "overwrite_elapsed": + if err := dec.Decode(&s.OverwriteElapsed); err != nil { + return fmt.Errorf("%s | %w", "OverwriteElapsed", err) + } + + case "overwrite_elapsed_nanos": + if err := dec.Decode(&s.OverwriteElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "OverwriteElapsedNanos", err) + } + + case "write_elapsed": + if err := dec.Decode(&s.WriteElapsed); err != nil { + return fmt.Errorf("%s | %w", "WriteElapsed", err) + } + + case "write_elapsed_nanos": + if err := dec.Decode(&s.WriteElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "WriteElapsedNanos", err) + } + + case "write_throttled": + if err := dec.Decode(&s.WriteThrottled); err != nil { + return fmt.Errorf("%s | %w", "WriteThrottled", err) + } + + case "write_throttled_nanos": + if err := dec.Decode(&s.WriteThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "WriteThrottledNanos", err) + } + + case "writer_node": + if err := dec.Decode(&s.WriterNode); err != nil { + return fmt.Errorf("%s | %w", "WriterNode", err) + } + + } + } + return nil +} + +// NewDetailsInfo returns a DetailsInfo. +func NewDetailsInfo() *DetailsInfo { + r := &DetailsInfo{} + + return r +} + +// false diff --git a/typedapi/types/detectionrule.go b/typedapi/types/detectionrule.go index c35521860c..6c7f7f2e42 100644 --- a/typedapi/types/detectionrule.go +++ b/typedapi/types/detectionrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // DetectionRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Rule.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Rule.ts#L25-L39 type DetectionRule struct { // Actions The set of actions to be triggered when the rule applies. If more than one // action is specified the effects of all actions are combined. @@ -45,8 +45,18 @@ type DetectionRule struct { // NewDetectionRule returns a DetectionRule. func NewDetectionRule() *DetectionRule { r := &DetectionRule{ - Scope: make(map[string]FilterRef, 0), + Scope: make(map[string]FilterRef), } return r } + +// true + +type DetectionRuleVariant interface { + DetectionRuleCaster() *DetectionRule +} + +func (s *DetectionRule) DetectionRuleCaster() *DetectionRule { + return s +} diff --git a/typedapi/types/detector.go b/typedapi/types/detector.go index 8a74a92dfb..5a95f8190e 100644 --- a/typedapi/types/detector.go +++ b/typedapi/types/detector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // Detector type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Detector.ts#L25-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Detector.ts#L25-L67 type Detector struct { // ByFieldName The field used to split the data. In particular, this property is used for // analyzing the splits with respect to their own history. It is used for @@ -183,3 +183,13 @@ func NewDetector() *Detector { return r } + +// true + +type DetectorVariant interface { + DetectorCaster() *Detector +} + +func (s *Detector) DetectorCaster() *Detector { + return s +} diff --git a/typedapi/types/detectorread.go b/typedapi/types/detectorread.go index 71dee15e36..32c89bf388 100644 --- a/typedapi/types/detectorread.go +++ b/typedapi/types/detectorread.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DetectorRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Detector.ts#L69-L125 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Detector.ts#L69-L125 type DetectorRead struct { // ByFieldName The field used to split the data. // In particular, this property is used for analyzing the splits with respect to @@ -190,3 +190,5 @@ func NewDetectorRead() *DetectorRead { return r } + +// false diff --git a/typedapi/types/detectorupdate.go b/typedapi/types/detectorupdate.go new file mode 100644 index 0000000000..19a204dbf4 --- /dev/null +++ b/typedapi/types/detectorupdate.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DetectorUpdate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Detector.ts#L127-L143 +type DetectorUpdate struct { + // CustomRules An array of custom rule objects, which enable you to customize the way + // detectors operate. + // For example, a rule may dictate to the detector conditions under which + // results should be skipped. + // Kibana refers to custom rules as job rules. + CustomRules []DetectionRule `json:"custom_rules,omitempty"` + // Description A description of the detector. + Description *string `json:"description,omitempty"` + // DetectorIndex A unique identifier for the detector. + // This identifier is based on the order of the detectors in the + // `analysis_config`, starting at zero. + DetectorIndex int `json:"detector_index"` +} + +func (s *DetectorUpdate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom_rules": + if err := dec.Decode(&s.CustomRules); err != nil { + return fmt.Errorf("%s | %w", "CustomRules", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "detector_index": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DetectorIndex", err) + } + s.DetectorIndex = value + case float64: + f := int(v) + s.DetectorIndex = f + } + + } + } + return nil +} + +// NewDetectorUpdate returns a DetectorUpdate. +func NewDetectorUpdate() *DetectorUpdate { + r := &DetectorUpdate{} + + return r +} + +// true + +type DetectorUpdateVariant interface { + DetectorUpdateCaster() *DetectorUpdate +} + +func (s *DetectorUpdate) DetectorUpdateCaster() *DetectorUpdate { + return s +} diff --git a/typedapi/types/dfsknnprofile.go b/typedapi/types/dfsknnprofile.go new file mode 100644 index 0000000000..c16b0fc128 --- /dev/null +++ b/typedapi/types/dfsknnprofile.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DfsKnnProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L181-L186 +type DfsKnnProfile struct { + Collector []KnnCollectorResult `json:"collector"` + Query []KnnQueryProfileResult `json:"query"` + RewriteTime int64 `json:"rewrite_time"` + VectorOperationsCount *int64 `json:"vector_operations_count,omitempty"` +} + +func (s *DfsKnnProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collector": + if err := dec.Decode(&s.Collector); err != nil { + return fmt.Errorf("%s | %w", "Collector", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "rewrite_time": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RewriteTime", err) + } + s.RewriteTime = value + case float64: + f := int64(v) + s.RewriteTime = f + } + + case "vector_operations_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "VectorOperationsCount", err) + } + s.VectorOperationsCount = &value + case float64: + f := int64(v) + s.VectorOperationsCount = &f + } + + } + } + return nil +} + +// NewDfsKnnProfile returns a DfsKnnProfile. +func NewDfsKnnProfile() *DfsKnnProfile { + r := &DfsKnnProfile{} + + return r +} + +// false diff --git a/typedapi/types/dfsprofile.go b/typedapi/types/dfsprofile.go new file mode 100644 index 0000000000..ff1d72fb15 --- /dev/null +++ b/typedapi/types/dfsprofile.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// DfsProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L154-L157 +type DfsProfile struct { + Knn []DfsKnnProfile `json:"knn,omitempty"` + Statistics *DfsStatisticsProfile `json:"statistics,omitempty"` +} + +// NewDfsProfile returns a DfsProfile. +func NewDfsProfile() *DfsProfile { + r := &DfsProfile{} + + return r +} + +// false diff --git a/typedapi/types/dfsstatisticsbreakdown.go b/typedapi/types/dfsstatisticsbreakdown.go new file mode 100644 index 0000000000..d8b8c51c33 --- /dev/null +++ b/typedapi/types/dfsstatisticsbreakdown.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DfsStatisticsBreakdown type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L170-L179 +type DfsStatisticsBreakdown struct { + CollectionStatistics int64 `json:"collection_statistics"` + CollectionStatisticsCount int64 `json:"collection_statistics_count"` + CreateWeight int64 `json:"create_weight"` + CreateWeightCount int64 `json:"create_weight_count"` + Rewrite int64 `json:"rewrite"` + RewriteCount int64 `json:"rewrite_count"` + TermStatistics int64 `json:"term_statistics"` + TermStatisticsCount int64 `json:"term_statistics_count"` +} + +func (s *DfsStatisticsBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collection_statistics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CollectionStatistics", err) + } + s.CollectionStatistics = value + case float64: + f := int64(v) + s.CollectionStatistics = f + } + + case "collection_statistics_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CollectionStatisticsCount", err) + } + s.CollectionStatisticsCount = value + case float64: + f := int64(v) + s.CollectionStatisticsCount = f + } + + case "create_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeight", err) + } + s.CreateWeight = value + case float64: + f := int64(v) + s.CreateWeight = f + } + + case "create_weight_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeightCount", err) + } + s.CreateWeightCount = value + case float64: + f := int64(v) + s.CreateWeightCount = f + } + + case "rewrite": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Rewrite", err) + } + s.Rewrite = value + case float64: + f := int64(v) + s.Rewrite = f + } + + case "rewrite_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RewriteCount", err) + } + s.RewriteCount = value + case float64: + f := int64(v) + s.RewriteCount = f + } + + case "term_statistics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TermStatistics", err) + } + s.TermStatistics = value + case float64: + f := int64(v) + s.TermStatistics = f + } + + case "term_statistics_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TermStatisticsCount", err) + } + s.TermStatisticsCount = value + case float64: + f := int64(v) + s.TermStatisticsCount = f + } + + } + } + return nil +} + +// NewDfsStatisticsBreakdown returns a DfsStatisticsBreakdown. +func NewDfsStatisticsBreakdown() *DfsStatisticsBreakdown { + r := &DfsStatisticsBreakdown{} + + return r +} + +// false diff --git a/typedapi/types/dfsstatisticsprofile.go b/typedapi/types/dfsstatisticsprofile.go new file mode 100644 index 0000000000..e7c8469ef1 --- /dev/null +++ b/typedapi/types/dfsstatisticsprofile.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DfsStatisticsProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L159-L167 +type DfsStatisticsProfile struct { + Breakdown DfsStatisticsBreakdown `json:"breakdown"` + Children []DfsStatisticsProfile `json:"children,omitempty"` + Debug map[string]json.RawMessage `json:"debug,omitempty"` + Description string `json:"description"` + Time Duration `json:"time,omitempty"` + TimeInNanos int64 `json:"time_in_nanos"` + Type string `json:"type"` +} + +func (s *DfsStatisticsProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return fmt.Errorf("%s | %w", "Breakdown", err) + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "debug": + if s.Debug == nil { + s.Debug = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Debug); err != nil { + return fmt.Errorf("%s | %w", "Debug", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewDfsStatisticsProfile returns a DfsStatisticsProfile. +func NewDfsStatisticsProfile() *DfsStatisticsProfile { + r := &DfsStatisticsProfile{ + Debug: make(map[string]json.RawMessage), + } + + return r +} + +// false diff --git a/typedapi/types/diagnosis.go b/typedapi/types/diagnosis.go index fa28e2b99c..4d5d460b77 100644 --- a/typedapi/types/diagnosis.go +++ b/typedapi/types/diagnosis.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Diagnosis type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L49-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L50-L56 type Diagnosis struct { Action string `json:"action"` AffectedResources DiagnosisAffectedResources `json:"affected_resources"` @@ -119,3 +119,5 @@ func NewDiagnosis() *Diagnosis { return r } + +// false diff --git a/typedapi/types/diagnosisaffectedresources.go b/typedapi/types/diagnosisaffectedresources.go index a45019e10b..c9f62455c7 100644 --- a/typedapi/types/diagnosisaffectedresources.go +++ b/typedapi/types/diagnosisaffectedresources.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DiagnosisAffectedResources type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L57-L63 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L58-L64 type DiagnosisAffectedResources struct { FeatureStates []string `json:"feature_states,omitempty"` Indices []string `json:"indices,omitempty"` @@ -101,3 +101,5 @@ func NewDiagnosisAffectedResources() *DiagnosisAffectedResources { return r } + +// false diff --git a/typedapi/types/dictionarydecompoundertokenfilter.go b/typedapi/types/dictionarydecompoundertokenfilter.go index 6e954dbbfe..9f5df5101f 100644 --- a/typedapi/types/dictionarydecompoundertokenfilter.go +++ b/typedapi/types/dictionarydecompoundertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DictionaryDecompounderTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L54-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L53-L55 type DictionaryDecompounderTokenFilter struct { HyphenationPatternsPath *string `json:"hyphenation_patterns_path,omitempty"` MaxSubwordSize *int `json:"max_subword_size,omitempty"` @@ -191,3 +191,13 @@ func NewDictionaryDecompounderTokenFilter() *DictionaryDecompounderTokenFilter { return r } + +// true + +type DictionaryDecompounderTokenFilterVariant interface { + DictionaryDecompounderTokenFilterCaster() *DictionaryDecompounderTokenFilter +} + +func (s *DictionaryDecompounderTokenFilter) DictionaryDecompounderTokenFilterCaster() *DictionaryDecompounderTokenFilter { + return s +} diff --git a/typedapi/types/directgenerator.go b/typedapi/types/directgenerator.go index c00019aace..fd7aea5b9f 100644 --- a/typedapi/types/directgenerator.go +++ b/typedapi/types/directgenerator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DirectGenerator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L268-L331 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L268-L331 type DirectGenerator struct { // Field The field to fetch the candidate suggestions from. // Needs to be set globally or per suggestion. @@ -252,3 +252,13 @@ func NewDirectGenerator() *DirectGenerator { return r } + +// true + +type DirectGeneratorVariant interface { + DirectGeneratorCaster() *DirectGenerator +} + +func (s *DirectGenerator) DirectGeneratorCaster() *DirectGenerator { + return s +} diff --git a/typedapi/types/discovery.go b/typedapi/types/discovery.go index 26b736957e..8d8f6cf624 100644 --- a/typedapi/types/discovery.go +++ b/typedapi/types/discovery.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Discovery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L201-L219 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L201-L219 type Discovery struct { ClusterApplierStats *ClusterAppliedStats `json:"cluster_applier_stats,omitempty"` // ClusterStateQueue Contains statistics for the cluster state queue of the node. @@ -43,8 +43,10 @@ type Discovery struct { // NewDiscovery returns a Discovery. func NewDiscovery() *Discovery { r := &Discovery{ - ClusterStateUpdate: make(map[string]ClusterStateUpdate, 0), + ClusterStateUpdate: make(map[string]ClusterStateUpdate), } return r } + +// false diff --git a/typedapi/types/discoverynode.go b/typedapi/types/discoverynode.go index e70456b38d..f54973b14a 100644 --- a/typedapi/types/discoverynode.go +++ b/typedapi/types/discoverynode.go @@ -16,82 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// DiscoveryNode type. +// DiscoveryNode type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DiscoveryNode.ts#L24-L30 -type DiscoveryNode struct { - Attributes map[string]string `json:"attributes"` - EphemeralId string `json:"ephemeral_id"` - Id string `json:"id"` - Name string `json:"name"` - TransportAddress string `json:"transport_address"` -} - -func (s *DiscoveryNode) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "attributes": - if s.Attributes == nil { - s.Attributes = make(map[string]string, 0) - } - if err := dec.Decode(&s.Attributes); err != nil { - return fmt.Errorf("%s | %w", "Attributes", err) - } - - case "ephemeral_id": - if err := dec.Decode(&s.EphemeralId); err != nil { - return fmt.Errorf("%s | %w", "EphemeralId", err) - } - - case "id": - if err := dec.Decode(&s.Id); err != nil { - return fmt.Errorf("%s | %w", "Id", err) - } - - case "name": - if err := dec.Decode(&s.Name); err != nil { - return fmt.Errorf("%s | %w", "Name", err) - } - - case "transport_address": - if err := dec.Decode(&s.TransportAddress); err != nil { - return fmt.Errorf("%s | %w", "TransportAddress", err) - } - - } - } - return nil -} - -// NewDiscoveryNode returns a DiscoveryNode. -func NewDiscoveryNode() *DiscoveryNode { - r := &DiscoveryNode{ - Attributes: make(map[string]string, 0), - } - - return r -} +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DiscoveryNode.ts#L25-L25 +type DiscoveryNode map[string]DiscoveryNodeContent diff --git a/typedapi/types/basenode.go b/typedapi/types/discoverynodecompact.go similarity index 57% rename from typedapi/types/basenode.go rename to typedapi/types/discoverynodecompact.go index 5f9f08979a..22f7f423a5 100644 --- a/typedapi/types/basenode.go +++ b/typedapi/types/discoverynodecompact.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,23 +26,20 @@ import ( "errors" "fmt" "io" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) -// BaseNode type. +// DiscoveryNodeCompact type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_spec_utils/BaseNode.ts#L25-L32 -type BaseNode struct { - Attributes map[string]string `json:"attributes"` - Host string `json:"host"` - Ip string `json:"ip"` - Name string `json:"name"` - Roles []noderole.NodeRole `json:"roles,omitempty"` - TransportAddress string `json:"transport_address"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DiscoveryNode.ts#L39-L48 +type DiscoveryNodeCompact struct { + Attributes map[string]string `json:"attributes"` + EphemeralId string `json:"ephemeral_id"` + Id string `json:"id"` + Name string `json:"name"` + TransportAddress string `json:"transport_address"` } -func (s *BaseNode) UnmarshalJSON(data []byte) error { +func (s *DiscoveryNodeCompact) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -65,14 +62,14 @@ func (s *BaseNode) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Attributes", err) } - case "host": - if err := dec.Decode(&s.Host); err != nil { - return fmt.Errorf("%s | %w", "Host", err) + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) } - case "ip": - if err := dec.Decode(&s.Ip); err != nil { - return fmt.Errorf("%s | %w", "Ip", err) + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) } case "name": @@ -80,11 +77,6 @@ func (s *BaseNode) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Name", err) } - case "roles": - if err := dec.Decode(&s.Roles); err != nil { - return fmt.Errorf("%s | %w", "Roles", err) - } - case "transport_address": if err := dec.Decode(&s.TransportAddress); err != nil { return fmt.Errorf("%s | %w", "TransportAddress", err) @@ -95,11 +87,13 @@ func (s *BaseNode) UnmarshalJSON(data []byte) error { return nil } -// NewBaseNode returns a BaseNode. -func NewBaseNode() *BaseNode { - r := &BaseNode{ - Attributes: make(map[string]string, 0), +// NewDiscoveryNodeCompact returns a DiscoveryNodeCompact. +func NewDiscoveryNodeCompact() *DiscoveryNodeCompact { + r := &DiscoveryNodeCompact{ + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/discoverynodecontent.go b/typedapi/types/discoverynodecontent.go new file mode 100644 index 0000000000..1fa2303580 --- /dev/null +++ b/typedapi/types/discoverynodecontent.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DiscoveryNodeContent type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DiscoveryNode.ts#L27-L37 +type DiscoveryNodeContent struct { + Attributes map[string]string `json:"attributes"` + EphemeralId string `json:"ephemeral_id"` + ExternalId string `json:"external_id"` + MaxIndexVersion int `json:"max_index_version"` + MinIndexVersion int `json:"min_index_version"` + Name *string `json:"name,omitempty"` + Roles []string `json:"roles"` + TransportAddress string `json:"transport_address"` + Version string `json:"version"` +} + +func (s *DiscoveryNodeContent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) + } + + case "external_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ExternalId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExternalId = o + + case "max_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxIndexVersion", err) + } + s.MaxIndexVersion = value + case float64: + f := int(v) + s.MaxIndexVersion = f + } + + case "min_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinIndexVersion", err) + } + s.MinIndexVersion = value + case float64: + f := int(v) + s.MinIndexVersion = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewDiscoveryNodeContent returns a DiscoveryNodeContent. +func NewDiscoveryNodeContent() *DiscoveryNodeContent { + r := &DiscoveryNodeContent{ + Attributes: make(map[string]string), + } + + return r +} + +// false diff --git a/typedapi/types/diskindicator.go b/typedapi/types/diskindicator.go index 09d42aa386..20f26e53a2 100644 --- a/typedapi/types/diskindicator.go +++ b/typedapi/types/diskindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DiskIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L122-L126 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L123-L127 type DiskIndicator struct { Details *DiskIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewDiskIndicator() *DiskIndicator { return r } + +// false diff --git a/typedapi/types/diskindicatordetails.go b/typedapi/types/diskindicatordetails.go index d2043aff16..867fefb847 100644 --- a/typedapi/types/diskindicatordetails.go +++ b/typedapi/types/diskindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DiskIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L127-L133 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L128-L134 type DiskIndicatorDetails struct { IndicesWithReadonlyBlock int64 `json:"indices_with_readonly_block"` NodesOverFloodStageWatermark int64 `json:"nodes_over_flood_stage_watermark"` @@ -141,3 +141,5 @@ func NewDiskIndicatorDetails() *DiskIndicatorDetails { return r } + +// false diff --git a/typedapi/types/diskusage.go b/typedapi/types/diskusage.go index c988ad6bfd..a8a05a585c 100644 --- a/typedapi/types/diskusage.go +++ b/typedapi/types/diskusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DiskUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L62-L69 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L63-L70 type DiskUsage struct { FreeBytes int64 `json:"free_bytes"` FreeDiskPercent Float64 `json:"free_disk_percent"` @@ -156,3 +156,5 @@ func NewDiskUsage() *DiskUsage { return r } + +// false diff --git a/typedapi/types/dismaxquery.go b/typedapi/types/dismaxquery.go index def2ef8107..268c8b0b11 100644 --- a/typedapi/types/dismaxquery.go +++ b/typedapi/types/dismaxquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DisMaxQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L79-L91 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L88-L103 type DisMaxQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -125,3 +125,13 @@ func NewDisMaxQuery() *DisMaxQuery { return r } + +// true + +type DisMaxQueryVariant interface { + DisMaxQueryCaster() *DisMaxQuery +} + +func (s *DisMaxQuery) DisMaxQueryCaster() *DisMaxQuery { + return s +} diff --git a/typedapi/types/dissectprocessor.go b/typedapi/types/dissectprocessor.go index 0fa9f1de84..de68c10b2d 100644 --- a/typedapi/types/dissectprocessor.go +++ b/typedapi/types/dissectprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DissectProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L571-L590 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L804-L823 type DissectProcessor struct { // AppendSeparator The character(s) that separate the appended fields. AppendSeparator *string `json:"append_separator,omitempty"` @@ -180,3 +180,13 @@ func NewDissectProcessor() *DissectProcessor { return r } + +// true + +type DissectProcessorVariant interface { + DissectProcessorCaster() *DissectProcessor +} + +func (s *DissectProcessor) DissectProcessorCaster() *DissectProcessor { + return s +} diff --git a/typedapi/types/distancefeaturequery.go b/typedapi/types/distancefeaturequery.go index 1c04345c37..b3b3a7bf93 100644 --- a/typedapi/types/distancefeaturequery.go +++ b/typedapi/types/distancefeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,9 @@ package types // GeoDistanceFeatureQuery // DateDistanceFeatureQuery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L77-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L76-L85 type DistanceFeatureQuery any + +type DistanceFeatureQueryVariant interface { + DistanceFeatureQueryCaster() *DistanceFeatureQuery +} diff --git a/typedapi/types/distancefeaturequerybasedatemathduration.go b/typedapi/types/distancefeaturequerybasedatemathduration.go deleted file mode 100644 index 267032b9c0..0000000000 --- a/typedapi/types/distancefeaturequerybasedatemathduration.go +++ /dev/null @@ -1,133 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// DistanceFeatureQueryBaseDateMathDuration type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L40-L60 -type DistanceFeatureQueryBaseDateMathDuration struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - // Field Name of the field used to calculate distances. This field must meet the - // following criteria: - // be a `date`, `date_nanos` or `geo_point` field; - // have an `index` mapping parameter value of `true`, which is the default; - // have an `doc_values` mapping parameter value of `true`, which is the default. - Field string `json:"field"` - // Origin Date or point of origin used to calculate distances. - // If the `field` value is a `date` or `date_nanos` field, the `origin` value - // must be a date. - // Date Math, such as `now-1h`, is supported. - // If the field value is a `geo_point` field, the `origin` value must be a - // geopoint. - Origin string `json:"origin"` - // Pivot Distance from the `origin` at which relevance scores receive half of the - // `boost` value. - // If the `field` value is a `date` or `date_nanos` field, the `pivot` value - // must be a time unit, such as `1h` or `10d`. If the `field` value is a - // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` - // or `12m`. - Pivot Duration `json:"pivot"` - QueryName_ *string `json:"_name,omitempty"` -} - -func (s *DistanceFeatureQueryBaseDateMathDuration) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "field": - if err := dec.Decode(&s.Field); err != nil { - return fmt.Errorf("%s | %w", "Field", err) - } - - case "origin": - if err := dec.Decode(&s.Origin); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - - case "pivot": - if err := dec.Decode(&s.Pivot); err != nil { - return fmt.Errorf("%s | %w", "Pivot", err) - } - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - } - } - return nil -} - -// NewDistanceFeatureQueryBaseDateMathDuration returns a DistanceFeatureQueryBaseDateMathDuration. -func NewDistanceFeatureQueryBaseDateMathDuration() *DistanceFeatureQueryBaseDateMathDuration { - r := &DistanceFeatureQueryBaseDateMathDuration{} - - return r -} diff --git a/typedapi/types/distancefeaturequerybasegeolocationdistance.go b/typedapi/types/distancefeaturequerybasegeolocationdistance.go deleted file mode 100644 index d6bae3171c..0000000000 --- a/typedapi/types/distancefeaturequerybasegeolocationdistance.go +++ /dev/null @@ -1,173 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// DistanceFeatureQueryBaseGeoLocationDistance type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L40-L60 -type DistanceFeatureQueryBaseGeoLocationDistance struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - // Field Name of the field used to calculate distances. This field must meet the - // following criteria: - // be a `date`, `date_nanos` or `geo_point` field; - // have an `index` mapping parameter value of `true`, which is the default; - // have an `doc_values` mapping parameter value of `true`, which is the default. - Field string `json:"field"` - // Origin Date or point of origin used to calculate distances. - // If the `field` value is a `date` or `date_nanos` field, the `origin` value - // must be a date. - // Date Math, such as `now-1h`, is supported. - // If the field value is a `geo_point` field, the `origin` value must be a - // geopoint. - Origin GeoLocation `json:"origin"` - // Pivot Distance from the `origin` at which relevance scores receive half of the - // `boost` value. - // If the `field` value is a `date` or `date_nanos` field, the `pivot` value - // must be a time unit, such as `1h` or `10d`. If the `field` value is a - // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` - // or `12m`. - Pivot string `json:"pivot"` - QueryName_ *string `json:"_name,omitempty"` -} - -func (s *DistanceFeatureQueryBaseGeoLocationDistance) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "field": - if err := dec.Decode(&s.Field); err != nil { - return fmt.Errorf("%s | %w", "Field", err) - } - - case "origin": - message := json.RawMessage{} - if err := dec.Decode(&message); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - keyDec := json.NewDecoder(bytes.NewReader(message)) - origin_field: - for { - t, err := keyDec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return fmt.Errorf("%s | %w", "Origin", err) - } - - switch t { - - case "lat", "lon": - o := NewLatLonGeoLocation() - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - s.Origin = o - break origin_field - - case "geohash": - o := NewGeoHashLocation() - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - s.Origin = o - break origin_field - - } - } - if s.Origin == nil { - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&s.Origin); err != nil { - return fmt.Errorf("%s | %w", "Origin", err) - } - } - - case "pivot": - if err := dec.Decode(&s.Pivot); err != nil { - return fmt.Errorf("%s | %w", "Pivot", err) - } - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - } - } - return nil -} - -// NewDistanceFeatureQueryBaseGeoLocationDistance returns a DistanceFeatureQueryBaseGeoLocationDistance. -func NewDistanceFeatureQueryBaseGeoLocationDistance() *DistanceFeatureQueryBaseGeoLocationDistance { - r := &DistanceFeatureQueryBaseGeoLocationDistance{} - - return r -} diff --git a/typedapi/types/diversifiedsampleraggregation.go b/typedapi/types/diversifiedsampleraggregation.go index 04a05c0e98..054d1bc399 100644 --- a/typedapi/types/diversifiedsampleraggregation.go +++ b/typedapi/types/diversifiedsampleraggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // DiversifiedSamplerAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L322-L343 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L333-L357 type DiversifiedSamplerAggregation struct { // ExecutionHint The type of value used for de-duplication. ExecutionHint *sampleraggregationexecutionhint.SamplerAggregationExecutionHint `json:"execution_hint,omitempty"` @@ -120,3 +120,13 @@ func NewDiversifiedSamplerAggregation() *DiversifiedSamplerAggregation { return r } + +// true + +type DiversifiedSamplerAggregationVariant interface { + DiversifiedSamplerAggregationCaster() *DiversifiedSamplerAggregation +} + +func (s *DiversifiedSamplerAggregation) DiversifiedSamplerAggregationCaster() *DiversifiedSamplerAggregation { + return s +} diff --git a/typedapi/types/docstats.go b/typedapi/types/docstats.go index 19f44d8362..913e01d37e 100644 --- a/typedapi/types/docstats.go +++ b/typedapi/types/docstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DocStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L97-L109 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L100-L112 type DocStats struct { // Count Total number of non-deleted documents across all primary shards assigned to // selected nodes. @@ -102,3 +102,5 @@ func NewDocStats() *DocStats { return r } + +// false diff --git a/typedapi/types/document.go b/typedapi/types/document.go index 7a332b5964..fba298f4f8 100644 --- a/typedapi/types/document.go +++ b/typedapi/types/document.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // Document type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/simulate/types.ts#L41-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Simulation.ts#L62-L76 type Document struct { // Id_ Unique identifier for the document. // This ID must be unique within the `_index`. @@ -82,3 +82,13 @@ func NewDocument() *Document { return r } + +// true + +type DocumentVariant interface { + DocumentCaster() *Document +} + +func (s *Document) DocumentCaster() *Document { + return s +} diff --git a/typedapi/types/documentrating.go b/typedapi/types/documentrating.go index 96cfcdeb8b..bb6eb71ab3 100644 --- a/typedapi/types/documentrating.go +++ b/typedapi/types/documentrating.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DocumentRating type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L119-L126 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L119-L126 type DocumentRating struct { // Id_ The document ID. Id_ string `json:"_id"` @@ -94,3 +94,13 @@ func NewDocumentRating() *DocumentRating { return r } + +// true + +type DocumentRatingVariant interface { + DocumentRatingCaster() *DocumentRating +} + +func (s *DocumentRating) DocumentRatingCaster() *DocumentRating { + return s +} diff --git a/typedapi/types/documentsimulation.go b/typedapi/types/documentsimulation.go index 31d0aa5c12..9851b775c0 100644 --- a/typedapi/types/documentsimulation.go +++ b/typedapi/types/documentsimulation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,15 +33,15 @@ import ( // DocumentSimulation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/simulate/types.ts#L57-L87 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Simulation.ts#L78-L108 type DocumentSimulation struct { DocumentSimulation map[string]string `json:"-"` // Id_ Unique identifier for the document. This ID must be unique within the // `_index`. Id_ string `json:"_id"` // Index_ Name of the index containing the document. - Index_ string `json:"_index"` - Ingest_ SimulateIngest `json:"_ingest"` + Index_ string `json:"_index"` + Ingest_ Ingest `json:"_ingest"` // Routing_ Value used to send the document to a specific primary shard. Routing_ *string `json:"_routing,omitempty"` // Source_ JSON body for the document. @@ -160,9 +160,11 @@ func (s DocumentSimulation) MarshalJSON() ([]byte, error) { // NewDocumentSimulation returns a DocumentSimulation. func NewDocumentSimulation() *DocumentSimulation { r := &DocumentSimulation{ - DocumentSimulation: make(map[string]string, 0), - Source_: make(map[string]json.RawMessage, 0), + DocumentSimulation: make(map[string]string), + Source_: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/dotexpanderprocessor.go b/typedapi/types/dotexpanderprocessor.go index 9967217d24..1b279f572f 100644 --- a/typedapi/types/dotexpanderprocessor.go +++ b/typedapi/types/dotexpanderprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DotExpanderProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L592-L603 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L825-L843 type DotExpanderProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -45,6 +45,13 @@ type DotExpanderProcessor struct { IgnoreFailure *bool `json:"ignore_failure,omitempty"` // OnFailure Handle failures for the processor. OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Override Controls the behavior when there is already an existing nested object that + // conflicts with the expanded field. + // When `false`, the processor will merge conflicts by combining the old and the + // new values into an array. + // When `true`, the value from the expanded field will overwrite the existing + // value. + Override *bool `json:"override,omitempty"` // Path The field that contains the field to expand. // Only required if the field to expand is part another object field, because // the `field` option can only understand leaf fields. @@ -117,6 +124,20 @@ func (s *DotExpanderProcessor) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "OnFailure", err) } + case "override": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Override", err) + } + s.Override = &value + case bool: + s.Override = &v + } + case "path": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -152,3 +173,13 @@ func NewDotExpanderProcessor() *DotExpanderProcessor { return r } + +// true + +type DotExpanderProcessorVariant interface { + DotExpanderProcessorCaster() *DotExpanderProcessor +} + +func (s *DotExpanderProcessor) DotExpanderProcessorCaster() *DotExpanderProcessor { + return s +} diff --git a/typedapi/types/doublenumberproperty.go b/typedapi/types/doublenumberproperty.go index aaa1938a43..c778334921 100644 --- a/typedapi/types/doublenumberproperty.go +++ b/typedapi/types/doublenumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // DoubleNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L152-L155 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L156-L159 type DoubleNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,13 +48,13 @@ type DoubleNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *Float64 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *Float64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +163,313 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -554,301 +567,313 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -859,18 +884,6 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -885,6 +898,11 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -932,8 +950,8 @@ func (s DoubleNumberProperty) MarshalJSON() ([]byte, error) { OnScriptError: s.OnScriptError, Properties: s.Properties, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -947,10 +965,20 @@ func (s DoubleNumberProperty) MarshalJSON() ([]byte, error) { // NewDoubleNumberProperty returns a DoubleNumberProperty. func NewDoubleNumberProperty() *DoubleNumberProperty { r := &DoubleNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DoubleNumberPropertyVariant interface { + DoubleNumberPropertyCaster() *DoubleNumberProperty +} + +func (s *DoubleNumberProperty) DoubleNumberPropertyCaster() *DoubleNumberProperty { + return s +} diff --git a/typedapi/types/doublerangeproperty.go b/typedapi/types/doublerangeproperty.go index 05db02b046..e0cbba5f9f 100644 --- a/typedapi/types/doublerangeproperty.go +++ b/typedapi/types/doublerangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // DoubleRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/range.ts#L34-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/range.ts#L34-L36 type DoubleRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,11 +45,11 @@ type DoubleRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { @@ -150,301 +151,313 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -507,318 +520,318 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -833,6 +846,11 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -847,19 +865,19 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { func (s DoubleRangeProperty) MarshalJSON() ([]byte, error) { type innerDoubleRangeProperty DoubleRangeProperty tmp := innerDoubleRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "double_range" @@ -870,10 +888,20 @@ func (s DoubleRangeProperty) MarshalJSON() ([]byte, error) { // NewDoubleRangeProperty returns a DoubleRangeProperty. func NewDoubleRangeProperty() *DoubleRangeProperty { r := &DoubleRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DoubleRangePropertyVariant interface { + DoubleRangePropertyCaster() *DoubleRangeProperty +} + +func (s *DoubleRangeProperty) DoubleRangePropertyCaster() *DoubleRangeProperty { + return s +} diff --git a/typedapi/types/doubletermsaggregate.go b/typedapi/types/doubletermsaggregate.go index 5a9a2c3fbc..bb807bf0d2 100644 --- a/typedapi/types/doubletermsaggregate.go +++ b/typedapi/types/doubletermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DoubleTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L413-L418 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L451-L456 type DoubleTermsAggregate struct { Buckets BucketsDoubleTermsBucket `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewDoubleTermsAggregate() *DoubleTermsAggregate { return r } + +// false diff --git a/typedapi/types/doubletermsbucket.go b/typedapi/types/doubletermsbucket.go index 5d67f15197..98b8312e84 100644 --- a/typedapi/types/doubletermsbucket.go +++ b/typedapi/types/doubletermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // DoubleTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L420-L423 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L458-L461 type DoubleTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -531,6 +531,13 @@ func (s *DoubleTermsBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -664,8 +671,10 @@ func (s DoubleTermsBucket) MarshalJSON() ([]byte, error) { // NewDoubleTermsBucket returns a DoubleTermsBucket. func NewDoubleTermsBucket() *DoubleTermsBucket { r := &DoubleTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/downsampleaction.go b/typedapi/types/downsampleaction.go index 5c4d21fcab..012292bfa1 100644 --- a/typedapi/types/downsampleaction.go +++ b/typedapi/types/downsampleaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DownsampleAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L115-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L112-L115 type DownsampleAction struct { FixedInterval string `json:"fixed_interval"` WaitTimeout Duration `json:"wait_timeout,omitempty"` @@ -72,3 +72,13 @@ func NewDownsampleAction() *DownsampleAction { return r } + +// true + +type DownsampleActionVariant interface { + DownsampleActionCaster() *DownsampleAction +} + +func (s *DownsampleAction) DownsampleActionCaster() *DownsampleAction { + return s +} diff --git a/typedapi/types/downsampleconfig.go b/typedapi/types/downsampleconfig.go index 7a69e69e62..8afd2dd791 100644 --- a/typedapi/types/downsampleconfig.go +++ b/typedapi/types/downsampleconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DownsampleConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/Downsample.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/Downsample.ts#L22-L27 type DownsampleConfig struct { // FixedInterval The interval at which to aggregate the original time series index. FixedInterval string `json:"fixed_interval"` @@ -67,3 +67,13 @@ func NewDownsampleConfig() *DownsampleConfig { return r } + +// true + +type DownsampleConfigVariant interface { + DownsampleConfigCaster() *DownsampleConfig +} + +func (s *DownsampleConfig) DownsampleConfigCaster() *DownsampleConfig { + return s +} diff --git a/typedapi/types/downsamplinground.go b/typedapi/types/downsamplinground.go index 106f02d04a..0f9c0f8176 100644 --- a/typedapi/types/downsamplinground.go +++ b/typedapi/types/downsamplinground.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // DownsamplingRound type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DownsamplingRound.ts#L23-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DownsamplingRound.ts#L23-L32 type DownsamplingRound struct { // After The duration since rollover when this downsampling round should execute After Duration `json:"after"` @@ -74,3 +74,13 @@ func NewDownsamplingRound() *DownsamplingRound { return r } + +// true + +type DownsamplingRoundVariant interface { + DownsamplingRoundCaster() *DownsamplingRound +} + +func (s *DownsamplingRound) DownsamplingRoundCaster() *DownsamplingRound { + return s +} diff --git a/typedapi/types/dropprocessor.go b/typedapi/types/dropprocessor.go index 25b3cc8daa..bebb698e7b 100644 --- a/typedapi/types/dropprocessor.go +++ b/typedapi/types/dropprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // DropProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L605-L605 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L845-L845 type DropProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -128,3 +128,13 @@ func NewDropProcessor() *DropProcessor { return r } + +// true + +type DropProcessorVariant interface { + DropProcessorCaster() *DropProcessor +} + +func (s *DropProcessor) DropProcessorCaster() *DropProcessor { + return s +} diff --git a/typedapi/types/duration.go b/typedapi/types/duration.go index 156c7aa194..a36b72c67c 100644 --- a/typedapi/types/duration.go +++ b/typedapi/types/duration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -24,5 +24,9 @@ package types // // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Time.ts#L52-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Time.ts#L52-L58 type Duration any + +type DurationVariant interface { + DurationCaster() *Duration +} diff --git a/typedapi/types/durationvalueunitfloatmillis.go b/typedapi/types/durationvalueunitfloatmillis.go index 3cd294ad7d..624e495a6a 100644 --- a/typedapi/types/durationvalueunitfloatmillis.go +++ b/typedapi/types/durationvalueunitfloatmillis.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DurationValueUnitFloatMillis type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Time.ts#L67-L67 type DurationValueUnitFloatMillis Float64 diff --git a/typedapi/types/durationvalueunitmillis.go b/typedapi/types/durationvalueunitmillis.go index d7c9da91f3..49b2c07b4b 100644 --- a/typedapi/types/durationvalueunitmillis.go +++ b/typedapi/types/durationvalueunitmillis.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DurationValueUnitMillis type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Time.ts#L67-L67 type DurationValueUnitMillis int64 + +type DurationValueUnitMillisVariant interface { + DurationValueUnitMillisCaster() *DurationValueUnitMillis +} diff --git a/typedapi/types/durationvalueunitnanos.go b/typedapi/types/durationvalueunitnanos.go index a483fb2487..4576b393e0 100644 --- a/typedapi/types/durationvalueunitnanos.go +++ b/typedapi/types/durationvalueunitnanos.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DurationValueUnitNanos type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Time.ts#L67-L67 type DurationValueUnitNanos int64 diff --git a/typedapi/types/durationvalueunitseconds.go b/typedapi/types/durationvalueunitseconds.go index 001a2fb539..dc2ea2f7b0 100644 --- a/typedapi/types/durationvalueunitseconds.go +++ b/typedapi/types/durationvalueunitseconds.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // DurationValueUnitSeconds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Time.ts#L67-L67 type DurationValueUnitSeconds int64 diff --git a/typedapi/types/dutchanalyzer.go b/typedapi/types/dutchanalyzer.go index 617ef0b8b4..642e85cd9a 100644 --- a/typedapi/types/dutchanalyzer.go +++ b/typedapi/types/dutchanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,14 +26,17 @@ import ( "errors" "fmt" "io" + "strconv" ) // DutchAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L61-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L125-L130 type DutchAnalyzer struct { - Stopwords []string `json:"stopwords,omitempty"` - Type string `json:"type,omitempty"` + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` } func (s *DutchAnalyzer) UnmarshalJSON(data []byte) error { @@ -51,6 +54,11 @@ func (s *DutchAnalyzer) UnmarshalJSON(data []byte) error { switch t { + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + case "stopwords": rawMsg := json.RawMessage{} dec.Decode(&rawMsg) @@ -67,6 +75,18 @@ func (s *DutchAnalyzer) UnmarshalJSON(data []byte) error { } } + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -81,8 +101,10 @@ func (s *DutchAnalyzer) UnmarshalJSON(data []byte) error { func (s DutchAnalyzer) MarshalJSON() ([]byte, error) { type innerDutchAnalyzer DutchAnalyzer tmp := innerDutchAnalyzer{ - Stopwords: s.Stopwords, - Type: s.Type, + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, } tmp.Type = "dutch" @@ -96,3 +118,13 @@ func NewDutchAnalyzer() *DutchAnalyzer { return r } + +// true + +type DutchAnalyzerVariant interface { + DutchAnalyzerCaster() *DutchAnalyzer +} + +func (s *DutchAnalyzer) DutchAnalyzerCaster() *DutchAnalyzer { + return s +} diff --git a/typedapi/types/dynamicproperty.go b/typedapi/types/dynamicproperty.go index cf12c0cf61..7e9d5dfa95 100644 --- a/typedapi/types/dynamicproperty.go +++ b/typedapi/types/dynamicproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,13 +31,14 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // DynamicProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L300-L331 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L318-L349 type DynamicProperty struct { Analyzer *string `json:"analyzer,omitempty"` Boost *Float64 `json:"boost,omitempty"` @@ -57,21 +58,21 @@ type DynamicProperty struct { IndexPrefixes *TextIndexPrefixes `json:"index_prefixes,omitempty"` Locale *string `json:"locale,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Norms *bool `json:"norms,omitempty"` - NullValue FieldValue `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - PositionIncrementGap *int `json:"position_increment_gap,omitempty"` - PrecisionStep *int `json:"precision_step,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - SearchAnalyzer *string `json:"search_analyzer,omitempty"` - SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` - TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + NullValue FieldValue `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + PositionIncrementGap *int `json:"position_increment_gap,omitempty"` + PrecisionStep *int `json:"precision_step,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` } func (s *DynamicProperty) UnmarshalJSON(data []byte) error { @@ -213,301 +214,313 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -688,301 +701,313 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -1017,18 +1042,6 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { } s.SearchQuoteAnalyzer = &o - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -1043,6 +1056,11 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "term_vector": if err := dec.Decode(&s.TermVector); err != nil { return fmt.Errorf("%s | %w", "TermVector", err) @@ -1094,8 +1112,8 @@ func (s DynamicProperty) MarshalJSON() ([]byte, error) { Script: s.Script, SearchAnalyzer: s.SearchAnalyzer, SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TermVector: s.TermVector, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -1109,10 +1127,20 @@ func (s DynamicProperty) MarshalJSON() ([]byte, error) { // NewDynamicProperty returns a DynamicProperty. func NewDynamicProperty() *DynamicProperty { r := &DynamicProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type DynamicPropertyVariant interface { + DynamicPropertyCaster() *DynamicProperty +} + +func (s *DynamicProperty) DynamicPropertyCaster() *DynamicProperty { + return s +} diff --git a/typedapi/types/dynamictemplate.go b/typedapi/types/dynamictemplate.go index 8c7a5b7923..95769931ea 100644 --- a/typedapi/types/dynamictemplate.go +++ b/typedapi/types/dynamictemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,17 +32,18 @@ import ( // DynamicTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/dynamic-template.ts#L22-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/dynamic-template.ts#L23-L43 type DynamicTemplate struct { - Mapping Property `json:"mapping,omitempty"` - Match []string `json:"match,omitempty"` - MatchMappingType []string `json:"match_mapping_type,omitempty"` - MatchPattern *matchtype.MatchType `json:"match_pattern,omitempty"` - PathMatch []string `json:"path_match,omitempty"` - PathUnmatch []string `json:"path_unmatch,omitempty"` - Runtime Property `json:"runtime,omitempty"` - Unmatch []string `json:"unmatch,omitempty"` - UnmatchMappingType []string `json:"unmatch_mapping_type,omitempty"` + AdditionalDynamicTemplateProperty map[string]json.RawMessage `json:"-"` + Mapping Property `json:"mapping,omitempty"` + Match []string `json:"match,omitempty"` + MatchMappingType []string `json:"match_mapping_type,omitempty"` + MatchPattern *matchtype.MatchType `json:"match_pattern,omitempty"` + PathMatch []string `json:"path_match,omitempty"` + PathUnmatch []string `json:"path_unmatch,omitempty"` + Runtime *RuntimeField `json:"runtime,omitempty"` + Unmatch []string `json:"unmatch,omitempty"` + UnmatchMappingType []string `json:"unmatch_mapping_type,omitempty"` } func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { @@ -77,300 +78,312 @@ func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { case "binary": o := NewBinaryProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "binary", err) } s.Mapping = *o case "boolean": o := NewBooleanProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "boolean", err) } s.Mapping = *o case "{dynamic_type}": o := NewDynamicProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "{dynamic_type}", err) } s.Mapping = *o case "join": o := NewJoinProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "join", err) } s.Mapping = *o case "keyword": o := NewKeywordProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "keyword", err) } s.Mapping = *o case "match_only_text": o := NewMatchOnlyTextProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "match_only_text", err) } s.Mapping = *o case "percolator": o := NewPercolatorProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "percolator", err) } s.Mapping = *o case "rank_feature": o := NewRankFeatureProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "rank_feature", err) } s.Mapping = *o case "rank_features": o := NewRankFeaturesProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "rank_features", err) } s.Mapping = *o case "search_as_you_type": o := NewSearchAsYouTypeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "search_as_you_type", err) } s.Mapping = *o case "text": o := NewTextProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "text", err) } s.Mapping = *o case "version": o := NewVersionProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "version", err) } s.Mapping = *o case "wildcard": o := NewWildcardProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "wildcard", err) } s.Mapping = *o case "date_nanos": o := NewDateNanosProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "date_nanos", err) } s.Mapping = *o case "date": o := NewDateProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "date", err) } s.Mapping = *o case "aggregate_metric_double": o := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "aggregate_metric_double", err) } s.Mapping = *o case "dense_vector": o := NewDenseVectorProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "dense_vector", err) } s.Mapping = *o case "flattened": o := NewFlattenedProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "flattened", err) } s.Mapping = *o case "nested": o := NewNestedProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "nested", err) } s.Mapping = *o case "object": o := NewObjectProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "object", err) + } + s.Mapping = *o + case "passthrough": + o := NewPassthroughObjectProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "passthrough", err) } s.Mapping = *o case "semantic_text": o := NewSemanticTextProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "semantic_text", err) } s.Mapping = *o case "sparse_vector": o := NewSparseVectorProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "sparse_vector", err) } s.Mapping = *o case "completion": o := NewCompletionProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "completion", err) } s.Mapping = *o case "constant_keyword": o := NewConstantKeywordProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "constant_keyword", err) + } + s.Mapping = *o + case "counted_keyword": + o := NewCountedKeywordProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "counted_keyword", err) } s.Mapping = *o case "alias": o := NewFieldAliasProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "alias", err) } s.Mapping = *o case "histogram": o := NewHistogramProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "histogram", err) } s.Mapping = *o case "ip": o := NewIpProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ip", err) } s.Mapping = *o case "murmur3": o := NewMurmur3HashProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "murmur3", err) } s.Mapping = *o case "token_count": o := NewTokenCountProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "token_count", err) } s.Mapping = *o case "geo_point": o := NewGeoPointProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "geo_point", err) } s.Mapping = *o case "geo_shape": o := NewGeoShapeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "geo_shape", err) } s.Mapping = *o case "point": o := NewPointProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "point", err) } s.Mapping = *o case "shape": o := NewShapeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "shape", err) } s.Mapping = *o case "byte": o := NewByteNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "byte", err) } s.Mapping = *o case "double": o := NewDoubleNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "double", err) } s.Mapping = *o case "float": o := NewFloatNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "float", err) } s.Mapping = *o case "half_float": o := NewHalfFloatNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "half_float", err) } s.Mapping = *o case "integer": o := NewIntegerNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "integer", err) } s.Mapping = *o case "long": o := NewLongNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "long", err) } s.Mapping = *o case "scaled_float": o := NewScaledFloatNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "scaled_float", err) } s.Mapping = *o case "short": o := NewShortNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "short", err) } s.Mapping = *o case "unsigned_long": o := NewUnsignedLongNumberProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "unsigned_long", err) } s.Mapping = *o case "date_range": o := NewDateRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "date_range", err) } s.Mapping = *o case "double_range": o := NewDoubleRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "double_range", err) } s.Mapping = *o case "float_range": o := NewFloatRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "float_range", err) } s.Mapping = *o case "integer_range": o := NewIntegerRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "integer_range", err) } s.Mapping = *o case "ip_range": o := NewIpRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "ip_range", err) } s.Mapping = *o case "long_range": o := NewLongRangeProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "long_range", err) } s.Mapping = *o case "icu_collation_keyword": o := NewIcuCollationProperty() if err := localDec.Decode(&o); err != nil { - return err + return fmt.Errorf("%s | %w", "icu_collation_keyword", err) } s.Mapping = *o default: if err := localDec.Decode(&s.Mapping); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } } @@ -444,317 +457,8 @@ func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { } case "runtime": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - kind := make(map[string]string, 0) - localDec := json.NewDecoder(source) - localDec.Decode(&kind) - source.Seek(0, io.SeekStart) - if _, ok := kind["type"]; !ok { - kind["type"] = "object" - } - switch kind["type"] { - - case "binary": - o := NewBinaryProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "boolean": - o := NewBooleanProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "{dynamic_type}": - o := NewDynamicProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "join": - o := NewJoinProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "keyword": - o := NewKeywordProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "match_only_text": - o := NewMatchOnlyTextProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "percolator": - o := NewPercolatorProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "rank_feature": - o := NewRankFeatureProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "rank_features": - o := NewRankFeaturesProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "search_as_you_type": - o := NewSearchAsYouTypeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "text": - o := NewTextProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "version": - o := NewVersionProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "wildcard": - o := NewWildcardProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "date_nanos": - o := NewDateNanosProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "date": - o := NewDateProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "aggregate_metric_double": - o := NewAggregateMetricDoubleProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "dense_vector": - o := NewDenseVectorProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "flattened": - o := NewFlattenedProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "nested": - o := NewNestedProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "object": - o := NewObjectProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "semantic_text": - o := NewSemanticTextProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "sparse_vector": - o := NewSparseVectorProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "completion": - o := NewCompletionProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "constant_keyword": - o := NewConstantKeywordProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "alias": - o := NewFieldAliasProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "histogram": - o := NewHistogramProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "ip": - o := NewIpProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "murmur3": - o := NewMurmur3HashProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "token_count": - o := NewTokenCountProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "geo_point": - o := NewGeoPointProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "geo_shape": - o := NewGeoShapeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "point": - o := NewPointProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "shape": - o := NewShapeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "byte": - o := NewByteNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "double": - o := NewDoubleNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "float": - o := NewFloatNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "half_float": - o := NewHalfFloatNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "integer": - o := NewIntegerNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "long": - o := NewLongNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "scaled_float": - o := NewScaledFloatNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "short": - o := NewShortNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "unsigned_long": - o := NewUnsignedLongNumberProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "date_range": - o := NewDateRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "double_range": - o := NewDoubleRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "float_range": - o := NewFloatRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "integer_range": - o := NewIntegerRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "ip_range": - o := NewIpRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "long_range": - o := NewLongRangeProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - case "icu_collation_keyword": - o := NewIcuCollationProperty() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Runtime = *o - default: - if err := localDec.Decode(&s.Runtime); err != nil { - return err - } + if err := dec.Decode(&s.Runtime); err != nil { + return fmt.Errorf("%s | %w", "Runtime", err) } case "unmatch": @@ -789,14 +493,68 @@ func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { } } + default: + + if key, ok := t.(string); ok { + if s.AdditionalDynamicTemplateProperty == nil { + s.AdditionalDynamicTemplateProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalDynamicTemplateProperty", err) + } + s.AdditionalDynamicTemplateProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s DynamicTemplate) MarshalJSON() ([]byte, error) { + type opt DynamicTemplate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDynamicTemplateProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDynamicTemplateProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewDynamicTemplate returns a DynamicTemplate. func NewDynamicTemplate() *DynamicTemplate { - r := &DynamicTemplate{} + r := &DynamicTemplate{ + AdditionalDynamicTemplateProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type DynamicTemplateVariant interface { + DynamicTemplateCaster() *DynamicTemplate +} + +func (s *DynamicTemplate) DynamicTemplateCaster() *DynamicTemplate { + return s +} diff --git a/typedapi/types/edgengramtokenfilter.go b/typedapi/types/edgengramtokenfilter.go index c3ccb552a4..3d08c19db3 100644 --- a/typedapi/types/edgengramtokenfilter.go +++ b/typedapi/types/edgengramtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // EdgeNGramTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L79-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L78-L84 type EdgeNGramTokenFilter struct { MaxGram *int `json:"max_gram,omitempty"` MinGram *int `json:"min_gram,omitempty"` @@ -138,3 +138,13 @@ func NewEdgeNGramTokenFilter() *EdgeNGramTokenFilter { return r } + +// true + +type EdgeNGramTokenFilterVariant interface { + EdgeNGramTokenFilterCaster() *EdgeNGramTokenFilter +} + +func (s *EdgeNGramTokenFilter) EdgeNGramTokenFilterCaster() *EdgeNGramTokenFilter { + return s +} diff --git a/typedapi/types/edgengramtokenizer.go b/typedapi/types/edgengramtokenizer.go index b0691c7038..5b505c67f0 100644 --- a/typedapi/types/edgengramtokenizer.go +++ b/typedapi/types/edgengramtokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,12 +33,12 @@ import ( // EdgeNGramTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L31-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L48-L57 type EdgeNGramTokenizer struct { CustomTokenChars *string `json:"custom_token_chars,omitempty"` - MaxGram int `json:"max_gram"` - MinGram int `json:"min_gram"` - TokenChars []tokenchar.TokenChar `json:"token_chars"` + MaxGram *int `json:"max_gram,omitempty"` + MinGram *int `json:"min_gram,omitempty"` + TokenChars []tokenchar.TokenChar `json:"token_chars,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } @@ -80,10 +80,10 @@ func (s *EdgeNGramTokenizer) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "MaxGram", err) } - s.MaxGram = value + s.MaxGram = &value case float64: f := int(v) - s.MaxGram = f + s.MaxGram = &f } case "min_gram": @@ -96,10 +96,10 @@ func (s *EdgeNGramTokenizer) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "MinGram", err) } - s.MinGram = value + s.MinGram = &value case float64: f := int(v) - s.MinGram = f + s.MinGram = &f } case "token_chars": @@ -145,3 +145,13 @@ func NewEdgeNGramTokenizer() *EdgeNGramTokenizer { return r } + +// true + +type EdgeNGramTokenizerVariant interface { + EdgeNGramTokenizerCaster() *EdgeNGramTokenizer +} + +func (s *EdgeNGramTokenizer) EdgeNGramTokenizerCaster() *EdgeNGramTokenizer { + return s +} diff --git a/typedapi/types/elasticsearchversioninfo.go b/typedapi/types/elasticsearchversioninfo.go index 852c3a55ac..d530933e5e 100644 --- a/typedapi/types/elasticsearchversioninfo.go +++ b/typedapi/types/elasticsearchversioninfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,17 +31,28 @@ import ( // ElasticsearchVersionInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Base.ts#L54-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Base.ts#L76-L115 type ElasticsearchVersionInfo struct { - BuildDate DateTime `json:"build_date"` - BuildFlavor string `json:"build_flavor"` - BuildHash string `json:"build_hash"` - BuildSnapshot bool `json:"build_snapshot"` - BuildType string `json:"build_type"` - Int string `json:"number"` - LuceneVersion string `json:"lucene_version"` - MinimumIndexCompatibilityVersion string `json:"minimum_index_compatibility_version"` - MinimumWireCompatibilityVersion string `json:"minimum_wire_compatibility_version"` + // BuildDate The Elasticsearch Git commit's date. + BuildDate DateTime `json:"build_date"` + // BuildFlavor The build flavor. For example, `default`. + BuildFlavor string `json:"build_flavor"` + // BuildHash The Elasticsearch Git commit's SHA hash. + BuildHash string `json:"build_hash"` + // BuildSnapshot Indicates whether the Elasticsearch build was a snapshot. + BuildSnapshot bool `json:"build_snapshot"` + // BuildType The build type that corresponds to how Elasticsearch was installed. + // For example, `docker`, `rpm`, or `tar`. + BuildType string `json:"build_type"` + // Int The Elasticsearch version number. + Int string `json:"number"` + // LuceneVersion The version number of Elasticsearch's underlying Lucene software. + LuceneVersion string `json:"lucene_version"` + // MinimumIndexCompatibilityVersion The minimum index version with which the responding node can read from disk. + MinimumIndexCompatibilityVersion string `json:"minimum_index_compatibility_version"` + // MinimumWireCompatibilityVersion The minimum node version with which the responding node can communicate. + // Also the minimum version from which you can perform a rolling upgrade. + MinimumWireCompatibilityVersion string `json:"minimum_wire_compatibility_version"` } func (s *ElasticsearchVersionInfo) UnmarshalJSON(data []byte) error { @@ -152,3 +163,5 @@ func NewElasticsearchVersionInfo() *ElasticsearchVersionInfo { return r } + +// false diff --git a/typedapi/types/elasticsearchversionmininfo.go b/typedapi/types/elasticsearchversionmininfo.go index 63249c54fe..5938202357 100644 --- a/typedapi/types/elasticsearchversionmininfo.go +++ b/typedapi/types/elasticsearchversionmininfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ElasticsearchVersionMinInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Base.ts#L66-L74 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Base.ts#L117-L125 type ElasticsearchVersionMinInfo struct { BuildFlavor string `json:"build_flavor"` Int string `json:"number"` @@ -99,3 +99,5 @@ func NewElasticsearchVersionMinInfo() *ElasticsearchVersionMinInfo { return r } + +// false diff --git a/typedapi/types/elisiontokenfilter.go b/typedapi/types/elisiontokenfilter.go index 4ac6893ef9..ad45bd3b74 100644 --- a/typedapi/types/elisiontokenfilter.go +++ b/typedapi/types/elisiontokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ElisionTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L189-L194 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L188-L193 type ElisionTokenFilter struct { Articles []string `json:"articles,omitempty"` ArticlesCase Stringifiedboolean `json:"articles_case,omitempty"` @@ -114,3 +114,13 @@ func NewElisionTokenFilter() *ElisionTokenFilter { return r } + +// true + +type ElisionTokenFilterVariant interface { + ElisionTokenFilterCaster() *ElisionTokenFilter +} + +func (s *ElisionTokenFilter) ElisionTokenFilterCaster() *ElisionTokenFilter { + return s +} diff --git a/typedapi/types/email.go b/typedapi/types/email.go index 74e75c6a8b..6b7177464e 100644 --- a/typedapi/types/email.go +++ b/typedapi/types/email.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // Email type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L238-L250 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L238-L250 type Email struct { Attachments map[string]EmailAttachmentContainer `json:"attachments,omitempty"` Bcc []string `json:"bcc,omitempty"` @@ -72,8 +72,19 @@ func (s *Email) UnmarshalJSON(data []byte) error { } case "bcc": - if err := dec.Decode(&s.Bcc); err != nil { - return fmt.Errorf("%s | %w", "Bcc", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } + + s.Bcc = append(s.Bcc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Bcc); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } } case "body": @@ -82,8 +93,19 @@ func (s *Email) UnmarshalJSON(data []byte) error { } case "cc": - if err := dec.Decode(&s.Cc); err != nil { - return fmt.Errorf("%s | %w", "Cc", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } + + s.Cc = append(s.Cc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Cc); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } } case "from": @@ -109,8 +131,19 @@ func (s *Email) UnmarshalJSON(data []byte) error { } case "reply_to": - if err := dec.Decode(&s.ReplyTo); err != nil { - return fmt.Errorf("%s | %w", "ReplyTo", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } + + s.ReplyTo = append(s.ReplyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ReplyTo); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } } case "sent_date": @@ -131,8 +164,19 @@ func (s *Email) UnmarshalJSON(data []byte) error { s.Subject = o case "to": - if err := dec.Decode(&s.To); err != nil { - return fmt.Errorf("%s | %w", "To", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + + s.To = append(s.To, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } } } @@ -143,8 +187,10 @@ func (s *Email) UnmarshalJSON(data []byte) error { // NewEmail returns a Email. func NewEmail() *Email { r := &Email{ - Attachments: make(map[string]EmailAttachmentContainer, 0), + Attachments: make(map[string]EmailAttachmentContainer), } return r } + +// false diff --git a/typedapi/types/emailaction.go b/typedapi/types/emailaction.go index 08a7880f47..bba3968c5e 100644 --- a/typedapi/types/emailaction.go +++ b/typedapi/types/emailaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // EmailAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L252-L252 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L252-L252 type EmailAction struct { Attachments map[string]EmailAttachmentContainer `json:"attachments,omitempty"` Bcc []string `json:"bcc,omitempty"` @@ -72,8 +72,19 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { } case "bcc": - if err := dec.Decode(&s.Bcc); err != nil { - return fmt.Errorf("%s | %w", "Bcc", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } + + s.Bcc = append(s.Bcc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Bcc); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } } case "body": @@ -82,8 +93,19 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { } case "cc": - if err := dec.Decode(&s.Cc); err != nil { - return fmt.Errorf("%s | %w", "Cc", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } + + s.Cc = append(s.Cc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Cc); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } } case "from": @@ -109,8 +131,19 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { } case "reply_to": - if err := dec.Decode(&s.ReplyTo); err != nil { - return fmt.Errorf("%s | %w", "ReplyTo", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } + + s.ReplyTo = append(s.ReplyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ReplyTo); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } } case "sent_date": @@ -131,8 +164,19 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { s.Subject = o case "to": - if err := dec.Decode(&s.To); err != nil { - return fmt.Errorf("%s | %w", "To", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + + s.To = append(s.To, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } } } @@ -143,8 +187,18 @@ func (s *EmailAction) UnmarshalJSON(data []byte) error { // NewEmailAction returns a EmailAction. func NewEmailAction() *EmailAction { r := &EmailAction{ - Attachments: make(map[string]EmailAttachmentContainer, 0), + Attachments: make(map[string]EmailAttachmentContainer), } return r } + +// true + +type EmailActionVariant interface { + EmailActionCaster() *EmailAction +} + +func (s *EmailAction) EmailActionCaster() *EmailAction { + return s +} diff --git a/typedapi/types/emailattachmentcontainer.go b/typedapi/types/emailattachmentcontainer.go index 14b11c7dea..6329556a4e 100644 --- a/typedapi/types/emailattachmentcontainer.go +++ b/typedapi/types/emailattachmentcontainer.go @@ -16,22 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // EmailAttachmentContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L211-L216 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L211-L216 type EmailAttachmentContainer struct { - Data *DataEmailAttachment `json:"data,omitempty"` - Http *HttpEmailAttachment `json:"http,omitempty"` - Reporting *ReportingEmailAttachment `json:"reporting,omitempty"` + AdditionalEmailAttachmentContainerProperty map[string]json.RawMessage `json:"-"` + Data *DataEmailAttachment `json:"data,omitempty"` + Http *HttpEmailAttachment `json:"http,omitempty"` + Reporting *ReportingEmailAttachment `json:"reporting,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s EmailAttachmentContainer) MarshalJSON() ([]byte, error) { + type opt EmailAttachmentContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalEmailAttachmentContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalEmailAttachmentContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewEmailAttachmentContainer returns a EmailAttachmentContainer. func NewEmailAttachmentContainer() *EmailAttachmentContainer { - r := &EmailAttachmentContainer{} + r := &EmailAttachmentContainer{ + AdditionalEmailAttachmentContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type EmailAttachmentContainerVariant interface { + EmailAttachmentContainerCaster() *EmailAttachmentContainer +} + +func (s *EmailAttachmentContainer) EmailAttachmentContainerCaster() *EmailAttachmentContainer { + return s +} diff --git a/typedapi/types/emailbody.go b/typedapi/types/emailbody.go index 5c59aea0c8..2324ca31e7 100644 --- a/typedapi/types/emailbody.go +++ b/typedapi/types/emailbody.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // EmailBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L192-L195 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L192-L195 type EmailBody struct { Html *string `json:"html,omitempty"` Text *string `json:"text,omitempty"` @@ -87,3 +87,13 @@ func NewEmailBody() *EmailBody { return r } + +// true + +type EmailBodyVariant interface { + EmailBodyCaster() *EmailBody +} + +func (s *EmailBody) EmailBodyCaster() *EmailBody { + return s +} diff --git a/typedapi/types/emailresult.go b/typedapi/types/emailresult.go index b4d97b58ff..38a57a5f01 100644 --- a/typedapi/types/emailresult.go +++ b/typedapi/types/emailresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // EmailResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L205-L209 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L205-L209 type EmailResult struct { Account *string `json:"account,omitempty"` Message Email `json:"message"` @@ -93,3 +93,5 @@ func NewEmailResult() *EmailResult { return r } + +// false diff --git a/typedapi/types/emptyobject.go b/typedapi/types/emptyobject.go index 8f2a426800..125d60d7ad 100644 --- a/typedapi/types/emptyobject.go +++ b/typedapi/types/emptyobject.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // EmptyObject type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L160-L161 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L164-L165 type EmptyObject struct { } @@ -32,3 +32,13 @@ func NewEmptyObject() *EmptyObject { return r } + +// true + +type EmptyObjectVariant interface { + EmptyObjectCaster() *EmptyObject +} + +func (s *EmptyObject) EmptyObjectCaster() *EmptyObject { + return s +} diff --git a/typedapi/types/englishanalyzer.go b/typedapi/types/englishanalyzer.go new file mode 100644 index 0000000000..b1c0016b73 --- /dev/null +++ b/typedapi/types/englishanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EnglishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L132-L137 +type EnglishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *EnglishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s EnglishAnalyzer) MarshalJSON() ([]byte, error) { + type innerEnglishAnalyzer EnglishAnalyzer + tmp := innerEnglishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "english" + + return json.Marshal(tmp) +} + +// NewEnglishAnalyzer returns a EnglishAnalyzer. +func NewEnglishAnalyzer() *EnglishAnalyzer { + r := &EnglishAnalyzer{} + + return r +} + +// true + +type EnglishAnalyzerVariant interface { + EnglishAnalyzerCaster() *EnglishAnalyzer +} + +func (s *EnglishAnalyzer) EnglishAnalyzerCaster() *EnglishAnalyzer { + return s +} diff --git a/typedapi/types/enrichpolicy.go b/typedapi/types/enrichpolicy.go index 12ccf3d743..80a5e55bba 100644 --- a/typedapi/types/enrichpolicy.go +++ b/typedapi/types/enrichpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // EnrichPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/_types/Policy.ts#L34-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/_types/Policy.ts#L34-L41 type EnrichPolicy struct { ElasticsearchVersion *string `json:"elasticsearch_version,omitempty"` EnrichFields []string `json:"enrich_fields"` @@ -126,3 +126,13 @@ func NewEnrichPolicy() *EnrichPolicy { return r } + +// true + +type EnrichPolicyVariant interface { + EnrichPolicyCaster() *EnrichPolicy +} + +func (s *EnrichPolicy) EnrichPolicyCaster() *EnrichPolicy { + return s +} diff --git a/typedapi/types/enrichprocessor.go b/typedapi/types/enrichprocessor.go index 4f4549264b..826cda751b 100644 --- a/typedapi/types/enrichprocessor.go +++ b/typedapi/types/enrichprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // EnrichProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L607-L646 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L847-L886 type EnrichProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -228,3 +228,13 @@ func NewEnrichProcessor() *EnrichProcessor { return r } + +// true + +type EnrichProcessorVariant interface { + EnrichProcessorCaster() *EnrichProcessor +} + +func (s *EnrichProcessor) EnrichProcessorCaster() *EnrichProcessor { + return s +} diff --git a/typedapi/types/ensemble.go b/typedapi/types/ensemble.go index e46f925269..a73d3ccfe7 100644 --- a/typedapi/types/ensemble.go +++ b/typedapi/types/ensemble.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Ensemble type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L93-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L93-L99 type Ensemble struct { AggregateOutput *AggregateOutput `json:"aggregate_output,omitempty"` ClassificationLabels []string `json:"classification_labels,omitempty"` @@ -98,3 +98,13 @@ func NewEnsemble() *Ensemble { return r } + +// true + +type EnsembleVariant interface { + EnsembleCaster() *Ensemble +} + +func (s *Ensemble) EnsembleCaster() *Ensemble { + return s +} diff --git a/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go b/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go index 987974e0e7..cefc9e8f1b 100644 --- a/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go +++ b/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package accesstokengranttype package accesstokengranttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_token/types.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_token/types.ts#L23-L48 type AccessTokenGrantType struct { Name string } diff --git a/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go b/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go index 49138de3bb..bd7c5f2d12 100644 --- a/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go +++ b/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package acknowledgementoptions package acknowledgementoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L109-L113 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L103-L107 type AcknowledgementOptions struct { Name string } diff --git a/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go b/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go index d2d4a15bfd..6eebb6afe3 100644 --- a/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go +++ b/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package actionexecutionmode package actionexecutionmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L73-L94 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L67-L88 type ActionExecutionMode struct { Name string } diff --git a/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go b/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go index 83383a5c76..0b5a90cf86 100644 --- a/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go +++ b/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package actionstatusoptions package actionstatusoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L102-L107 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L96-L101 type ActionStatusOptions struct { Name string } diff --git a/typedapi/types/enums/actiontype/actiontype.go b/typedapi/types/enums/actiontype/actiontype.go index 44e0cecc16..8f85cfcac7 100644 --- a/typedapi/types/enums/actiontype/actiontype.go +++ b/typedapi/types/enums/actiontype/actiontype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package actiontype package actiontype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L64-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L58-L65 type ActionType struct { Name string } diff --git a/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go b/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go index 2e759582a2..33ddccc877 100644 --- a/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go +++ b/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package allocationexplaindecision package allocationexplaindecision import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L33-L38 type AllocationExplainDecision struct { Name string } @@ -45,13 +45,13 @@ func (a AllocationExplainDecision) MarshalText() (text []byte, err error) { func (a *AllocationExplainDecision) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "NO": + case "no": *a = NO - case "YES": + case "yes": *a = YES - case "THROTTLE": + case "throttle": *a = THROTTLE - case "ALWAYS": + case "always": *a = ALWAYS default: *a = AllocationExplainDecision{string(text)} diff --git a/typedapi/types/enums/apikeygranttype/apikeygranttype.go b/typedapi/types/enums/apikeygranttype/apikeygranttype.go index 81296134c6..91e8f974a6 100644 --- a/typedapi/types/enums/apikeygranttype/apikeygranttype.go +++ b/typedapi/types/enums/apikeygranttype/apikeygranttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package apikeygranttype package apikeygranttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/grant_api_key/types.ts#L48-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/grant_api_key/types.ts#L47-L50 type ApiKeyGrantType struct { Name string } diff --git a/typedapi/types/enums/apikeytype/apikeytype.go b/typedapi/types/enums/apikeytype/apikeytype.go new file mode 100644 index 0000000000..2f76948f3d --- /dev/null +++ b/typedapi/types/enums/apikeytype/apikeytype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package apikeytype +package apikeytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/ApiKey.ts#L115-L118 +type ApiKeyType struct { + Name string +} + +var ( + Rest = ApiKeyType{"rest"} + + Crosscluster = ApiKeyType{"cross_cluster"} +) + +func (a ApiKeyType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *ApiKeyType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "rest": + *a = Rest + case "cross_cluster": + *a = Crosscluster + default: + *a = ApiKeyType{string(text)} + } + + return nil +} + +func (a ApiKeyType) String() string { + return a.Name +} diff --git a/typedapi/types/enums/appliesto/appliesto.go b/typedapi/types/enums/appliesto/appliesto.go index ef2071f37f..b0be2af1e9 100644 --- a/typedapi/types/enums/appliesto/appliesto.go +++ b/typedapi/types/enums/appliesto/appliesto.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package appliesto package appliesto import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Rule.ts#L67-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Rule.ts#L67-L72 type AppliesTo struct { Name string } diff --git a/typedapi/types/enums/boundaryscanner/boundaryscanner.go b/typedapi/types/enums/boundaryscanner/boundaryscanner.go index 9684edfc25..36e579805a 100644 --- a/typedapi/types/enums/boundaryscanner/boundaryscanner.go +++ b/typedapi/types/enums/boundaryscanner/boundaryscanner.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package boundaryscanner package boundaryscanner import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/highlighting.ts#L27-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/highlighting.ts#L26-L45 type BoundaryScanner struct { Name string } diff --git a/typedapi/types/enums/bytes/bytes.go b/typedapi/types/enums/bytes/bytes.go index b4c8b16ea1..ae83b13ff4 100644 --- a/typedapi/types/enums/bytes/bytes.go +++ b/typedapi/types/enums/bytes/bytes.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package bytes package bytes import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L169-L181 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L173-L185 type Bytes struct { Name string } diff --git a/typedapi/types/enums/calendarinterval/calendarinterval.go b/typedapi/types/enums/calendarinterval/calendarinterval.go index 53df56e7f1..60c58075d1 100644 --- a/typedapi/types/enums/calendarinterval/calendarinterval.go +++ b/typedapi/types/enums/calendarinterval/calendarinterval.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package calendarinterval package calendarinterval import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L251-L268 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L262-L279 type CalendarInterval struct { Name string } diff --git a/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go b/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go index 2c86be3147..74c9060e2d 100644 --- a/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go +++ b/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package cardinalityexecutionmode package cardinalityexecutionmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L64-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L64-L85 type CardinalityExecutionMode struct { Name string } diff --git a/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go b/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go index 9e0821de5c..a70cd38e53 100644 --- a/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go +++ b/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package catanomalydetectorcolumn package catanomalydetectorcolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L32-L401 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L32-L401 type CatAnomalyDetectorColumn struct { Name string } diff --git a/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go b/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go index 736933ca75..aab956ed15 100644 --- a/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go +++ b/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package catdatafeedcolumn package catdatafeedcolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L405-L471 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L405-L471 type CatDatafeedColumn struct { Name string } diff --git a/typedapi/types/enums/catdfacolumn/catdfacolumn.go b/typedapi/types/enums/catdfacolumn/catdfacolumn.go index 1097e00f52..7c75389906 100644 --- a/typedapi/types/enums/catdfacolumn/catdfacolumn.go +++ b/typedapi/types/enums/catdfacolumn/catdfacolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package catdfacolumn package catdfacolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L472-L557 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L472-L557 type CatDfaColumn struct { Name string } diff --git a/typedapi/types/enums/categorizationstatus/categorizationstatus.go b/typedapi/types/enums/categorizationstatus/categorizationstatus.go index a80b0f2fcb..d763869c59 100644 --- a/typedapi/types/enums/categorizationstatus/categorizationstatus.go +++ b/typedapi/types/enums/categorizationstatus/categorizationstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package categorizationstatus package categorizationstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Model.ts#L83-L86 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Model.ts#L84-L87 type CategorizationStatus struct { Name string } diff --git a/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go b/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go index ab392bdbc6..ca0c9b4514 100644 --- a/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go +++ b/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package cattrainedmodelscolumn package cattrainedmodelscolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L561-L635 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L561-L635 type CatTrainedModelsColumn struct { Name string } diff --git a/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go b/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go index 3e98315153..02483ef390 100644 --- a/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go +++ b/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package cattransformcolumn package cattransformcolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/_types/CatBase.ts#L640-L844 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/_types/CatBase.ts#L640-L844 type CatTransformColumn struct { Name string } diff --git a/typedapi/types/enums/childscoremode/childscoremode.go b/typedapi/types/enums/childscoremode/childscoremode.go index ce849581f0..52acc44cea 100644 --- a/typedapi/types/enums/childscoremode/childscoremode.go +++ b/typedapi/types/enums/childscoremode/childscoremode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package childscoremode package childscoremode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/joining.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/joining.ts#L25-L39 type ChildScoreMode struct { Name string } diff --git a/typedapi/types/enums/chunkingmode/chunkingmode.go b/typedapi/types/enums/chunkingmode/chunkingmode.go index 87287ef46a..7330dcb228 100644 --- a/typedapi/types/enums/chunkingmode/chunkingmode.go +++ b/typedapi/types/enums/chunkingmode/chunkingmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package chunkingmode package chunkingmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L232-L236 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L245-L249 type ChunkingMode struct { Name string } diff --git a/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go b/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go index d9547c1d1b..690dc80578 100644 --- a/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go +++ b/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package clusterinfotarget package clusterinfotarget import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L381-L387 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L385-L391 type ClusterInfoTarget struct { Name string } diff --git a/typedapi/types/enums/clusterprivilege/clusterprivilege.go b/typedapi/types/enums/clusterprivilege/clusterprivilege.go index 7cd28074a2..789928afc0 100644 --- a/typedapi/types/enums/clusterprivilege/clusterprivilege.go +++ b/typedapi/types/enums/clusterprivilege/clusterprivilege.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package clusterprivilege package clusterprivilege import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L42-L195 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L42-L199 type ClusterPrivilege struct { Name string } @@ -117,6 +117,8 @@ var ( Monitorsnapshot = ClusterPrivilege{"monitor_snapshot"} + Monitorstats = ClusterPrivilege{"monitor_stats"} + Monitortextstructure = ClusterPrivilege{"monitor_text_structure"} Monitortransform = ClusterPrivilege{"monitor_transform"} @@ -241,6 +243,8 @@ func (c *ClusterPrivilege) UnmarshalText(text []byte) error { *c = Monitorrollup case "monitor_snapshot": *c = Monitorsnapshot + case "monitor_stats": + *c = Monitorstats case "monitor_text_structure": *c = Monitortextstructure case "monitor_transform": diff --git a/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go b/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go index ce18ead4c2..1de98fd22a 100644 --- a/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go +++ b/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package clustersearchstatus package clustersearchstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L37-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L37-L43 type ClusterSearchStatus struct { Name string } diff --git a/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go b/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go index 7d1df5a360..f3dabd9b9b 100644 --- a/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go +++ b/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package combinedfieldsoperator package combinedfieldsoperator import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/abstractions.ts#L509-L512 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/abstractions.ts#L519-L522 type CombinedFieldsOperator struct { Name string } diff --git a/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go b/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go index 2b9823d622..747fc2c571 100644 --- a/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go +++ b/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package combinedfieldszeroterms package combinedfieldszeroterms import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/abstractions.ts#L514-L523 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/abstractions.ts#L524-L533 type CombinedFieldsZeroTerms struct { Name string } diff --git a/typedapi/types/enums/conditionop/conditionop.go b/typedapi/types/enums/conditionop/conditionop.go index f6f569281e..8dea407dbc 100644 --- a/typedapi/types/enums/conditionop/conditionop.go +++ b/typedapi/types/enums/conditionop/conditionop.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package conditionop package conditionop import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Conditions.ts#L41-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Conditions.ts#L41-L48 type ConditionOp struct { Name string } diff --git a/typedapi/types/enums/conditionoperator/conditionoperator.go b/typedapi/types/enums/conditionoperator/conditionoperator.go index ddbb089104..325d586bb6 100644 --- a/typedapi/types/enums/conditionoperator/conditionoperator.go +++ b/typedapi/types/enums/conditionoperator/conditionoperator.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package conditionoperator package conditionoperator import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Rule.ts#L74-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Rule.ts#L74-L79 type ConditionOperator struct { Name string } diff --git a/typedapi/types/enums/conditiontype/conditiontype.go b/typedapi/types/enums/conditiontype/conditiontype.go index 31d6537e50..fdbd188c5c 100644 --- a/typedapi/types/enums/conditiontype/conditiontype.go +++ b/typedapi/types/enums/conditiontype/conditiontype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package conditiontype package conditiontype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Conditions.ts#L64-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Conditions.ts#L64-L70 type ConditionType struct { Name string } diff --git a/typedapi/types/enums/conflicts/conflicts.go b/typedapi/types/enums/conflicts/conflicts.go index 6d57ffef00..fe076365c4 100644 --- a/typedapi/types/enums/conflicts/conflicts.go +++ b/typedapi/types/enums/conflicts/conflicts.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package conflicts package conflicts import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L183-L192 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L187-L196 type Conflicts struct { Name string } diff --git a/typedapi/types/enums/connectionscheme/connectionscheme.go b/typedapi/types/enums/connectionscheme/connectionscheme.go index a1972b6cb4..8f126c6d3d 100644 --- a/typedapi/types/enums/connectionscheme/connectionscheme.go +++ b/typedapi/types/enums/connectionscheme/connectionscheme.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package connectionscheme package connectionscheme import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L39-L42 type ConnectionScheme struct { Name string } diff --git a/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go b/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go index e247e211d5..d96ca13bb0 100644 --- a/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go +++ b/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package connectorfieldtype package connectorfieldtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L43-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L43-L48 type ConnectorFieldType struct { Name string } diff --git a/typedapi/types/enums/connectorstatus/connectorstatus.go b/typedapi/types/enums/connectorstatus/connectorstatus.go index 7df0e658ee..9d53762485 100644 --- a/typedapi/types/enums/connectorstatus/connectorstatus.go +++ b/typedapi/types/enums/connectorstatus/connectorstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package connectorstatus package connectorstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L130-L136 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L130-L136 type ConnectorStatus struct { Name string } diff --git a/typedapi/types/enums/converttype/converttype.go b/typedapi/types/enums/converttype/converttype.go index 83578a07b2..0d83984e7e 100644 --- a/typedapi/types/enums/converttype/converttype.go +++ b/typedapi/types/enums/converttype/converttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package converttype package converttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L435-L443 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L661-L670 type ConvertType struct { Name string } @@ -33,14 +33,16 @@ var ( Long = ConvertType{"long"} - Float = ConvertType{"float"} - Double = ConvertType{"double"} - String = ConvertType{"string"} + Float = ConvertType{"float"} Boolean = ConvertType{"boolean"} + Ip = ConvertType{"ip"} + + String = ConvertType{"string"} + Auto = ConvertType{"auto"} ) @@ -55,14 +57,16 @@ func (c *ConvertType) UnmarshalText(text []byte) error { *c = Integer case "long": *c = Long - case "float": - *c = Float case "double": *c = Double - case "string": - *c = String + case "float": + *c = Float case "boolean": *c = Boolean + case "ip": + *c = Ip + case "string": + *c = String case "auto": *c = Auto default: diff --git a/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go b/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go index 527dce8c13..75fcb41240 100644 --- a/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go +++ b/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package dataattachmentformat package dataattachmentformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L187-L190 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L187-L190 type DataAttachmentFormat struct { Name string } diff --git a/typedapi/types/enums/datafeedstate/datafeedstate.go b/typedapi/types/enums/datafeedstate/datafeedstate.go index f4aae943e3..f40f23bd67 100644 --- a/typedapi/types/enums/datafeedstate/datafeedstate.go +++ b/typedapi/types/enums/datafeedstate/datafeedstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package datafeedstate package datafeedstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L132-L137 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L136-L141 type DatafeedState struct { Name string } diff --git a/typedapi/types/enums/dataframestate/dataframestate.go b/typedapi/types/enums/dataframestate/dataframestate.go index dc3fec20a7..d2414b687f 100644 --- a/typedapi/types/enums/dataframestate/dataframestate.go +++ b/typedapi/types/enums/dataframestate/dataframestate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package dataframestate package dataframestate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Dataframe.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Dataframe.ts#L20-L26 type DataframeState struct { Name string } diff --git a/typedapi/types/enums/day/day.go b/typedapi/types/enums/day/day.go index ef45a6c358..9b73a4235b 100644 --- a/typedapi/types/enums/day/day.go +++ b/typedapi/types/enums/day/day.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package day package day import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L37-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L37-L45 type Day struct { Name string } diff --git a/typedapi/types/enums/decision/decision.go b/typedapi/types/enums/decision/decision.go index 52425c32ad..10606a846d 100644 --- a/typedapi/types/enums/decision/decision.go +++ b/typedapi/types/enums/decision/decision.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package decision package decision import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L86-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L92-L101 type Decision struct { Name string } diff --git a/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go b/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go index 0787b07dff..3acbe1744d 100644 --- a/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go +++ b/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package delimitedpayloadencoding package delimitedpayloadencoding import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L62-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L61-L65 type DelimitedPayloadEncoding struct { Name string } diff --git a/typedapi/types/enums/deploymentstate/deploymentstate.go b/typedapi/types/enums/densevectorelementtype/densevectorelementtype.go similarity index 56% rename from typedapi/types/enums/deploymentstate/deploymentstate.go rename to typedapi/types/enums/densevectorelementtype/densevectorelementtype.go index acca500212..b249295bac 100644 --- a/typedapi/types/enums/deploymentstate/deploymentstate.go +++ b/typedapi/types/enums/densevectorelementtype/densevectorelementtype.go @@ -16,46 +16,46 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Package deploymentstate -package deploymentstate +// Package densevectorelementtype +package densevectorelementtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L274-L287 -type DeploymentState struct { +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/DenseVectorProperty.ts#L64-L80 +type DenseVectorElementType struct { Name string } var ( - Started = DeploymentState{"started"} + Bit = DenseVectorElementType{"bit"} - Starting = DeploymentState{"starting"} + Byte = DenseVectorElementType{"byte"} - Stopping = DeploymentState{"stopping"} + Float = DenseVectorElementType{"float"} ) -func (d DeploymentState) MarshalText() (text []byte, err error) { +func (d DenseVectorElementType) MarshalText() (text []byte, err error) { return []byte(d.String()), nil } -func (d *DeploymentState) UnmarshalText(text []byte) error { +func (d *DenseVectorElementType) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "started": - *d = Started - case "starting": - *d = Starting - case "stopping": - *d = Stopping + case "bit": + *d = Bit + case "byte": + *d = Byte + case "float": + *d = Float default: - *d = DeploymentState{string(text)} + *d = DenseVectorElementType{string(text)} } return nil } -func (d DeploymentState) String() string { +func (d DenseVectorElementType) String() string { return d.Name } diff --git a/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go b/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go new file mode 100644 index 0000000000..a476b17bbd --- /dev/null +++ b/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package densevectorindexoptionstype +package densevectorindexoptionstype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/DenseVectorProperty.ts#L164-L197 +type DenseVectorIndexOptionsType struct { + Name string +} + +var ( + Flat = DenseVectorIndexOptionsType{"flat"} + + Hnsw = DenseVectorIndexOptionsType{"hnsw"} + + Int4flat = DenseVectorIndexOptionsType{"int4_flat"} + + Int4hnsw = DenseVectorIndexOptionsType{"int4_hnsw"} + + Int8flat = DenseVectorIndexOptionsType{"int8_flat"} + + Int8hnsw = DenseVectorIndexOptionsType{"int8_hnsw"} +) + +func (d DenseVectorIndexOptionsType) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DenseVectorIndexOptionsType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "flat": + *d = Flat + case "hnsw": + *d = Hnsw + case "int4_flat": + *d = Int4flat + case "int4_hnsw": + *d = Int4hnsw + case "int8_flat": + *d = Int8flat + case "int8_hnsw": + *d = Int8hnsw + default: + *d = DenseVectorIndexOptionsType{string(text)} + } + + return nil +} + +func (d DenseVectorIndexOptionsType) String() string { + return d.Name +} diff --git a/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go b/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go new file mode 100644 index 0000000000..9e555d8965 --- /dev/null +++ b/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package densevectorsimilarity +package densevectorsimilarity + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/DenseVectorProperty.ts#L82-L127 +type DenseVectorSimilarity struct { + Name string +} + +var ( + Cosine = DenseVectorSimilarity{"cosine"} + + Dotproduct = DenseVectorSimilarity{"dot_product"} + + L2norm = DenseVectorSimilarity{"l2_norm"} + + Maxinnerproduct = DenseVectorSimilarity{"max_inner_product"} +) + +func (d DenseVectorSimilarity) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DenseVectorSimilarity) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "cosine": + *d = Cosine + case "dot_product": + *d = Dotproduct + case "l2_norm": + *d = L2norm + case "max_inner_product": + *d = Maxinnerproduct + default: + *d = DenseVectorSimilarity{string(text)} + } + + return nil +} + +func (d DenseVectorSimilarity) String() string { + return d.Name +} diff --git a/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go b/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go index 6743608f53..7f236aa058 100644 --- a/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go +++ b/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package deploymentallocationstate package deploymentallocationstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L289-L302 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L330-L343 type DeploymentAllocationState struct { Name string } diff --git a/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go b/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go index a7b343c844..89b551b889 100644 --- a/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go +++ b/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package deploymentassignmentstate package deploymentassignmentstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L304-L309 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L345-L362 type DeploymentAssignmentState struct { Name string } var ( - Starting = DeploymentAssignmentState{"starting"} - Started = DeploymentAssignmentState{"started"} + Starting = DeploymentAssignmentState{"starting"} + Stopping = DeploymentAssignmentState{"stopping"} Failed = DeploymentAssignmentState{"failed"} @@ -45,10 +45,10 @@ func (d DeploymentAssignmentState) MarshalText() (text []byte, err error) { func (d *DeploymentAssignmentState) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "starting": - *d = Starting case "started": *d = Started + case "starting": + *d = Starting case "stopping": *d = Stopping case "failed": diff --git a/typedapi/types/enums/deprecationlevel/deprecationlevel.go b/typedapi/types/enums/deprecationlevel/deprecationlevel.go index 38630ba1e0..8207ce4590 100644 --- a/typedapi/types/enums/deprecationlevel/deprecationlevel.go +++ b/typedapi/types/enums/deprecationlevel/deprecationlevel.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package deprecationlevel package deprecationlevel import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/migration/deprecations/types.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/migration/deprecations/types.ts#L23-L30 type DeprecationLevel struct { Name string } diff --git a/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go b/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go index 532de93a25..dc569b4185 100644 --- a/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go +++ b/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package dfiindependencemeasure package dfiindependencemeasure import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Similarity.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Similarity.ts#L20-L24 type DFIIndependenceMeasure struct { Name string } diff --git a/typedapi/types/enums/dfraftereffect/dfraftereffect.go b/typedapi/types/enums/dfraftereffect/dfraftereffect.go index b1353ebd7f..700fedbd09 100644 --- a/typedapi/types/enums/dfraftereffect/dfraftereffect.go +++ b/typedapi/types/enums/dfraftereffect/dfraftereffect.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package dfraftereffect package dfraftereffect import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Similarity.ts#L26-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Similarity.ts#L26-L30 type DFRAfterEffect struct { Name string } diff --git a/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go b/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go index bbf2f8cf0b..ce45feb3bb 100644 --- a/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go +++ b/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package dfrbasicmodel package dfrbasicmodel import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Similarity.ts#L32-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Similarity.ts#L32-L40 type DFRBasicModel struct { Name string } diff --git a/typedapi/types/enums/displaytype/displaytype.go b/typedapi/types/enums/displaytype/displaytype.go index f0016dba27..6a880aa3c1 100644 --- a/typedapi/types/enums/displaytype/displaytype.go +++ b/typedapi/types/enums/displaytype/displaytype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package displaytype package displaytype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L35-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L35-L41 type DisplayType struct { Name string } diff --git a/typedapi/types/enums/distanceunit/distanceunit.go b/typedapi/types/enums/distanceunit/distanceunit.go index 3adf0f6e5e..4b94b71a0e 100644 --- a/typedapi/types/enums/distanceunit/distanceunit.go +++ b/typedapi/types/enums/distanceunit/distanceunit.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package distanceunit package distanceunit import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L30-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L30-L40 type DistanceUnit struct { Name string } diff --git a/typedapi/types/enums/dynamicmapping/dynamicmapping.go b/typedapi/types/enums/dynamicmapping/dynamicmapping.go index 3bfcf59438..9661daf0ae 100644 --- a/typedapi/types/enums/dynamicmapping/dynamicmapping.go +++ b/typedapi/types/enums/dynamicmapping/dynamicmapping.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package dynamicmapping package dynamicmapping import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/dynamic-template.ts#L49-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/dynamic-template.ts#L50-L59 type DynamicMapping struct { Name string } diff --git a/typedapi/types/enums/ecscompatibilitytype/ecscompatibilitytype.go b/typedapi/types/enums/ecscompatibilitytype/ecscompatibilitytype.go new file mode 100644 index 0000000000..35ee614195 --- /dev/null +++ b/typedapi/types/enums/ecscompatibilitytype/ecscompatibilitytype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package ecscompatibilitytype +package ecscompatibilitytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/_types/Structure.ts#L40-L43 +type EcsCompatibilityType struct { + Name string +} + +var ( + Disabled = EcsCompatibilityType{"disabled"} + + V1 = EcsCompatibilityType{"v1"} +) + +func (e EcsCompatibilityType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EcsCompatibilityType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "disabled": + *e = Disabled + case "v1": + *e = V1 + default: + *e = EcsCompatibilityType{string(text)} + } + + return nil +} + +func (e EcsCompatibilityType) String() string { + return e.Name +} diff --git a/typedapi/types/enums/edgengramside/edgengramside.go b/typedapi/types/enums/edgengramside/edgengramside.go index b17841f856..0053d2b8ff 100644 --- a/typedapi/types/enums/edgengramside/edgengramside.go +++ b/typedapi/types/enums/edgengramside/edgengramside.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package edgengramside package edgengramside import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L74-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L73-L76 type EdgeNGramSide struct { Name string } diff --git a/typedapi/types/enums/emailpriority/emailpriority.go b/typedapi/types/enums/emailpriority/emailpriority.go index db919669a8..9d4c9a163b 100644 --- a/typedapi/types/enums/emailpriority/emailpriority.go +++ b/typedapi/types/enums/emailpriority/emailpriority.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package emailpriority package emailpriority import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L197-L203 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L197-L203 type EmailPriority struct { Name string } diff --git a/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go b/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go index 89716402f6..a0239ff23a 100644 --- a/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go +++ b/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package enrichpolicyphase package enrichpolicyphase import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/execute_policy/types.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/execute_policy/types.ts#L24-L29 type EnrichPolicyPhase struct { Name string } @@ -45,13 +45,13 @@ func (e EnrichPolicyPhase) MarshalText() (text []byte, err error) { func (e *EnrichPolicyPhase) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "SCHEDULED": + case "scheduled": *e = SCHEDULED - case "RUNNING": + case "running": *e = RUNNING - case "COMPLETE": + case "complete": *e = COMPLETE - case "FAILED": + case "failed": *e = FAILED default: *e = EnrichPolicyPhase{string(text)} diff --git a/typedapi/types/enums/esqlformat/esqlformat.go b/typedapi/types/enums/esqlformat/esqlformat.go new file mode 100644 index 0000000000..aa749b5283 --- /dev/null +++ b/typedapi/types/enums/esqlformat/esqlformat.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package esqlformat +package esqlformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/query/QueryParameters.ts#L20-L29 +type EsqlFormat struct { + Name string +} + +var ( + Csv = EsqlFormat{"csv"} + + Json = EsqlFormat{"json"} + + Tsv = EsqlFormat{"tsv"} + + Txt = EsqlFormat{"txt"} + + Yaml = EsqlFormat{"yaml"} + + Cbor = EsqlFormat{"cbor"} + + Smile = EsqlFormat{"smile"} + + Arrow = EsqlFormat{"arrow"} +) + +func (e EsqlFormat) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EsqlFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "csv": + *e = Csv + case "json": + *e = Json + case "tsv": + *e = Tsv + case "txt": + *e = Txt + case "yaml": + *e = Yaml + case "cbor": + *e = Cbor + case "smile": + *e = Smile + case "arrow": + *e = Arrow + default: + *e = EsqlFormat{string(text)} + } + + return nil +} + +func (e EsqlFormat) String() string { + return e.Name +} diff --git a/typedapi/types/enums/eventtype/eventtype.go b/typedapi/types/enums/eventtype/eventtype.go new file mode 100644 index 0000000000..21f58a8088 --- /dev/null +++ b/typedapi/types/enums/eventtype/eventtype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package eventtype +package eventtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/_types/AnalyticsEvent.ts#L22-L26 +type EventType struct { + Name string +} + +var ( + PageView = EventType{"page_view"} + + Search = EventType{"search"} + + SearchClick = EventType{"search_click"} +) + +func (e EventType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EventType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "page_view": + *e = PageView + case "search": + *e = Search + case "search_click": + *e = SearchClick + default: + *e = EventType{string(text)} + } + + return nil +} + +func (e EventType) String() string { + return e.Name +} diff --git a/typedapi/types/enums/excludefrequent/excludefrequent.go b/typedapi/types/enums/excludefrequent/excludefrequent.go index 40ed2207cb..65d7ea0aa4 100644 --- a/typedapi/types/enums/excludefrequent/excludefrequent.go +++ b/typedapi/types/enums/excludefrequent/excludefrequent.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package excludefrequent package excludefrequent import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Detector.ts#L127-L132 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Detector.ts#L145-L150 type ExcludeFrequent struct { Name string } diff --git a/typedapi/types/enums/executionphase/executionphase.go b/typedapi/types/enums/executionphase/executionphase.go index fded6bd51c..17cfd87c4c 100644 --- a/typedapi/types/enums/executionphase/executionphase.go +++ b/typedapi/types/enums/executionphase/executionphase.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package executionphase package executionphase import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Execution.ts#L49-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Execution.ts#L49-L58 type ExecutionPhase struct { Name string } diff --git a/typedapi/types/enums/executionstatus/executionstatus.go b/typedapi/types/enums/executionstatus/executionstatus.go index d449cb3cdb..d27acc65c7 100644 --- a/typedapi/types/enums/executionstatus/executionstatus.go +++ b/typedapi/types/enums/executionstatus/executionstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package executionstatus package executionstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Execution.ts#L38-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Execution.ts#L38-L47 type ExecutionStatus struct { Name string } diff --git a/typedapi/types/enums/expandwildcard/expandwildcard.go b/typedapi/types/enums/expandwildcard/expandwildcard.go index 7e17963878..01f6818e0d 100644 --- a/typedapi/types/enums/expandwildcard/expandwildcard.go +++ b/typedapi/types/enums/expandwildcard/expandwildcard.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package expandwildcard package expandwildcard import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L201-L215 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L205-L219 type ExpandWildcard struct { Name string } diff --git a/typedapi/types/enums/failurestorestatus/failurestorestatus.go b/typedapi/types/enums/failurestorestatus/failurestorestatus.go new file mode 100644 index 0000000000..18973079a8 --- /dev/null +++ b/typedapi/types/enums/failurestorestatus/failurestorestatus.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package failurestorestatus +package failurestorestatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/types.ts#L86-L91 +type FailureStoreStatus struct { + Name string +} + +var ( + Notapplicableorunknown = FailureStoreStatus{"not_applicable_or_unknown"} + + Used = FailureStoreStatus{"used"} + + Notenabled = FailureStoreStatus{"not_enabled"} + + Failed = FailureStoreStatus{"failed"} +) + +func (f FailureStoreStatus) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FailureStoreStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "not_applicable_or_unknown": + *f = Notapplicableorunknown + case "used": + *f = Used + case "not_enabled": + *f = Notenabled + case "failed": + *f = Failed + default: + *f = FailureStoreStatus{string(text)} + } + + return nil +} + +func (f FailureStoreStatus) String() string { + return f.Name +} diff --git a/typedapi/types/enums/feature/feature.go b/typedapi/types/enums/feature/feature.go index df58249335..2c7b6a5be9 100644 --- a/typedapi/types/enums/feature/feature.go +++ b/typedapi/types/enums/feature/feature.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package feature package feature import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get/IndicesGetRequest.ts#L91-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get/IndicesGetRequest.ts#L98-L102 type Feature struct { Name string } diff --git a/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go b/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go index d6705fb767..c3b3006775 100644 --- a/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go +++ b/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package fieldsortnumerictype package fieldsortnumerictype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L37-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L36-L41 type FieldSortNumericType struct { Name string } diff --git a/typedapi/types/enums/fieldtype/fieldtype.go b/typedapi/types/enums/fieldtype/fieldtype.go index 87f2b48f6e..685321d2ba 100644 --- a/typedapi/types/enums/fieldtype/fieldtype.go +++ b/typedapi/types/enums/fieldtype/fieldtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package fieldtype package fieldtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/Property.ts#L166-L213 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/Property.ts#L191-L240 type FieldType struct { Name string } @@ -57,6 +57,8 @@ var ( Object = FieldType{"object"} + Passthrough = FieldType{"passthrough"} + Version = FieldType{"version"} Murmur3 = FieldType{"murmur3"} @@ -109,6 +111,8 @@ var ( Constantkeyword = FieldType{"constant_keyword"} + Countedkeyword = FieldType{"counted_keyword"} + Aggregatemetricdouble = FieldType{"aggregate_metric_double"} Densevector = FieldType{"dense_vector"} @@ -157,6 +161,8 @@ func (f *FieldType) UnmarshalText(text []byte) error { *f = Nested case "object": *f = Object + case "passthrough": + *f = Passthrough case "version": *f = Version case "murmur3": @@ -209,6 +215,8 @@ func (f *FieldType) UnmarshalText(text []byte) error { *f = Histogram case "constant_keyword": *f = Constantkeyword + case "counted_keyword": + *f = Countedkeyword case "aggregate_metric_double": *f = Aggregatemetricdouble case "dense_vector": diff --git a/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go b/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go index c9e587e100..97a7cda3dc 100644 --- a/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go +++ b/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package fieldvaluefactormodifier package fieldvaluefactormodifier import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L310-L353 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L323-L366 type FieldValueFactorModifier struct { Name string } diff --git a/typedapi/types/enums/filteringpolicy/filteringpolicy.go b/typedapi/types/enums/filteringpolicy/filteringpolicy.go index 6dbff4f4dc..38fb17aa7f 100644 --- a/typedapi/types/enums/filteringpolicy/filteringpolicy.go +++ b/typedapi/types/enums/filteringpolicy/filteringpolicy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package filteringpolicy package filteringpolicy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L155-L158 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L155-L158 type FilteringPolicy struct { Name string } diff --git a/typedapi/types/enums/filteringrulerule/filteringrulerule.go b/typedapi/types/enums/filteringrulerule/filteringrulerule.go index 2bd6fa2ab6..4ebacb77dc 100644 --- a/typedapi/types/enums/filteringrulerule/filteringrulerule.go +++ b/typedapi/types/enums/filteringrulerule/filteringrulerule.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package filteringrulerule package filteringrulerule import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L160-L168 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L160-L168 type FilteringRuleRule struct { Name string } diff --git a/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go b/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go index 99a53eadf9..14b337caaf 100644 --- a/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go +++ b/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package filteringvalidationstate package filteringvalidationstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L186-L190 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L186-L190 type FilteringValidationState struct { Name string } diff --git a/typedapi/types/enums/filtertype/filtertype.go b/typedapi/types/enums/filtertype/filtertype.go index e4fe783a38..e770c9aefc 100644 --- a/typedapi/types/enums/filtertype/filtertype.go +++ b/typedapi/types/enums/filtertype/filtertype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package filtertype package filtertype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Filter.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Filter.ts#L43-L46 type FilterType struct { Name string } diff --git a/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go b/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go new file mode 100644 index 0000000000..2cf2ff410b --- /dev/null +++ b/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package fingerprintdigest +package fingerprintdigest + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L896-L902 +type FingerprintDigest struct { + Name string +} + +var ( + Md5 = FingerprintDigest{"MD5"} + + Sha1 = FingerprintDigest{"SHA-1"} + + Sha256 = FingerprintDigest{"SHA-256"} + + Sha512 = FingerprintDigest{"SHA-512"} + + MurmurHash3 = FingerprintDigest{"MurmurHash3"} +) + +func (f FingerprintDigest) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FingerprintDigest) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "md5": + *f = Md5 + case "sha-1": + *f = Sha1 + case "sha-256": + *f = Sha256 + case "sha-512": + *f = Sha512 + case "murmurhash3": + *f = MurmurHash3 + default: + *f = FingerprintDigest{string(text)} + } + + return nil +} + +func (f FingerprintDigest) String() string { + return f.Name +} diff --git a/typedapi/types/enums/followerindexstatus/followerindexstatus.go b/typedapi/types/enums/followerindexstatus/followerindexstatus.go index 90b9d9a5ea..4c5b29397b 100644 --- a/typedapi/types/enums/followerindexstatus/followerindexstatus.go +++ b/typedapi/types/enums/followerindexstatus/followerindexstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package followerindexstatus package followerindexstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/follow_info/types.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/follow_info/types.ts#L37-L40 type FollowerIndexStatus struct { Name string } diff --git a/typedapi/types/enums/formattype/formattype.go b/typedapi/types/enums/formattype/formattype.go new file mode 100644 index 0000000000..b29699aa61 --- /dev/null +++ b/typedapi/types/enums/formattype/formattype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package formattype +package formattype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/_types/Structure.ts#L45-L50 +type FormatType struct { + Name string +} + +var ( + Delimited = FormatType{"delimited"} + + Ndjson = FormatType{"ndjson"} + + Semistructuredtext = FormatType{"semi_structured_text"} + + Xml = FormatType{"xml"} +) + +func (f FormatType) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FormatType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "delimited": + *f = Delimited + case "ndjson": + *f = Ndjson + case "semi_structured_text": + *f = Semistructuredtext + case "xml": + *f = Xml + default: + *f = FormatType{string(text)} + } + + return nil +} + +func (f FormatType) String() string { + return f.Name +} diff --git a/typedapi/types/enums/functionboostmode/functionboostmode.go b/typedapi/types/enums/functionboostmode/functionboostmode.go index b159b01208..ea7233634f 100644 --- a/typedapi/types/enums/functionboostmode/functionboostmode.go +++ b/typedapi/types/enums/functionboostmode/functionboostmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package functionboostmode package functionboostmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L282-L308 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L295-L321 type FunctionBoostMode struct { Name string } diff --git a/typedapi/types/enums/functionscoremode/functionscoremode.go b/typedapi/types/enums/functionscoremode/functionscoremode.go index 650484638f..8a043ee380 100644 --- a/typedapi/types/enums/functionscoremode/functionscoremode.go +++ b/typedapi/types/enums/functionscoremode/functionscoremode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package functionscoremode package functionscoremode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L255-L280 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L268-L293 type FunctionScoreMode struct { Name string } diff --git a/typedapi/types/enums/gappolicy/gappolicy.go b/typedapi/types/enums/gappolicy/gappolicy.go index 89fa23205e..74c7da5fa1 100644 --- a/typedapi/types/enums/gappolicy/gappolicy.go +++ b/typedapi/types/enums/gappolicy/gappolicy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package gappolicy package gappolicy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L61-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L61-L76 type GapPolicy struct { Name string } diff --git a/typedapi/types/enums/geodistancetype/geodistancetype.go b/typedapi/types/enums/geodistancetype/geodistancetype.go index e6e6104931..2df9a90102 100644 --- a/typedapi/types/enums/geodistancetype/geodistancetype.go +++ b/typedapi/types/enums/geodistancetype/geodistancetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package geodistancetype package geodistancetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L42-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L42-L51 type GeoDistanceType struct { Name string } diff --git a/typedapi/types/enums/geoexecution/geoexecution.go b/typedapi/types/enums/geoexecution/geoexecution.go index 65bb2709b1..57f5ee37b7 100644 --- a/typedapi/types/enums/geoexecution/geoexecution.go +++ b/typedapi/types/enums/geoexecution/geoexecution.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package geoexecution package geoexecution import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/geo.ts#L55-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/geo.ts#L59-L62 type GeoExecution struct { Name string } diff --git a/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go b/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go new file mode 100644 index 0000000000..ef8b4e2ec0 --- /dev/null +++ b/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package geogridtargetformat +package geogridtargetformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L437-L440 +type GeoGridTargetFormat struct { + Name string +} + +var ( + Geojson = GeoGridTargetFormat{"geojson"} + + Wkt = GeoGridTargetFormat{"wkt"} +) + +func (g GeoGridTargetFormat) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoGridTargetFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "geojson": + *g = Geojson + case "wkt": + *g = Wkt + default: + *g = GeoGridTargetFormat{string(text)} + } + + return nil +} + +func (g GeoGridTargetFormat) String() string { + return g.Name +} diff --git a/typedapi/types/enums/geogridtiletype/geogridtiletype.go b/typedapi/types/enums/geogridtiletype/geogridtiletype.go new file mode 100644 index 0000000000..127bf42fbc --- /dev/null +++ b/typedapi/types/enums/geogridtiletype/geogridtiletype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package geogridtiletype +package geogridtiletype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L431-L435 +type GeoGridTileType struct { + Name string +} + +var ( + Geotile = GeoGridTileType{"geotile"} + + Geohex = GeoGridTileType{"geohex"} + + Geohash = GeoGridTileType{"geohash"} +) + +func (g GeoGridTileType) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoGridTileType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "geotile": + *g = Geotile + case "geohex": + *g = Geohex + case "geohash": + *g = Geohash + default: + *g = GeoGridTileType{string(text)} + } + + return nil +} + +func (g GeoGridTileType) String() string { + return g.Name +} diff --git a/typedapi/types/enums/geoorientation/geoorientation.go b/typedapi/types/enums/geoorientation/geoorientation.go index 477852d6a6..26bc16babf 100644 --- a/typedapi/types/enums/geoorientation/geoorientation.go +++ b/typedapi/types/enums/geoorientation/geoorientation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package geoorientation package geoorientation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/geo.ts#L34-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/geo.ts#L34-L39 type GeoOrientation struct { Name string } diff --git a/typedapi/types/enums/geoshaperelation/geoshaperelation.go b/typedapi/types/enums/geoshaperelation/geoshaperelation.go index 1a3f9cc49c..543d6ce971 100644 --- a/typedapi/types/enums/geoshaperelation/geoshaperelation.go +++ b/typedapi/types/enums/geoshaperelation/geoshaperelation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package geoshaperelation package geoshaperelation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L64-L82 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L64-L82 type GeoShapeRelation struct { Name string } diff --git a/typedapi/types/enums/geostrategy/geostrategy.go b/typedapi/types/enums/geostrategy/geostrategy.go index 778d0001dd..2977ff816f 100644 --- a/typedapi/types/enums/geostrategy/geostrategy.go +++ b/typedapi/types/enums/geostrategy/geostrategy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package geostrategy package geostrategy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/geo.ts#L56-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/geo.ts#L56-L59 type GeoStrategy struct { Name string } diff --git a/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go b/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go index f9a3bf711a..12e70bbcf5 100644 --- a/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go +++ b/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package geovalidationmethod package geovalidationmethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/geo.ts#L159-L169 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/geo.ts#L173-L183 type GeoValidationMethod struct { Name string } diff --git a/typedapi/types/enums/granttype/granttype.go b/typedapi/types/enums/granttype/granttype.go index ac168ca265..ee57de78c1 100644 --- a/typedapi/types/enums/granttype/granttype.go +++ b/typedapi/types/enums/granttype/granttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package granttype package granttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/GrantType.ts#L20-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/GrantType.ts#L20-L30 type GrantType struct { Name string } diff --git a/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go b/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go index 412eea7626..51f807bae7 100644 --- a/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go +++ b/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package gridaggregationtype package gridaggregationtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search_mvt/_types/GridType.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search_mvt/_types/GridType.ts#L30-L33 type GridAggregationType struct { Name string } diff --git a/typedapi/types/enums/gridtype/gridtype.go b/typedapi/types/enums/gridtype/gridtype.go index c998eeb1fb..de43cbaee9 100644 --- a/typedapi/types/enums/gridtype/gridtype.go +++ b/typedapi/types/enums/gridtype/gridtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package gridtype package gridtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search_mvt/_types/GridType.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search_mvt/_types/GridType.ts#L20-L28 type GridType struct { Name string } diff --git a/typedapi/types/enums/groupby/groupby.go b/typedapi/types/enums/groupby/groupby.go index 16d2b33b0c..f694d4e89e 100644 --- a/typedapi/types/enums/groupby/groupby.go +++ b/typedapi/types/enums/groupby/groupby.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package groupby package groupby import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/tasks/_types/GroupBy.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/tasks/_types/GroupBy.ts#L20-L27 type GroupBy struct { Name string } diff --git a/typedapi/types/enums/healthstatus/healthstatus.go b/typedapi/types/enums/healthstatus/healthstatus.go index eff5e733ff..7586e3d9f7 100644 --- a/typedapi/types/enums/healthstatus/healthstatus.go +++ b/typedapi/types/enums/healthstatus/healthstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package healthstatus package healthstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L219-L239 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L223-L243 type HealthStatus struct { Name string } diff --git a/typedapi/types/enums/highlighterencoder/highlighterencoder.go b/typedapi/types/enums/highlighterencoder/highlighterencoder.go index 772d1650c4..8ee1454ab1 100644 --- a/typedapi/types/enums/highlighterencoder/highlighterencoder.go +++ b/typedapi/types/enums/highlighterencoder/highlighterencoder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package highlighterencoder package highlighterencoder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/highlighting.ts#L158-L161 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/highlighting.ts#L157-L160 type HighlighterEncoder struct { Name string } diff --git a/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go b/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go index 18f5ababc9..77319e22ab 100644 --- a/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go +++ b/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package highlighterfragmenter package highlighterfragmenter import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/highlighting.ts#L163-L166 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/highlighting.ts#L162-L165 type HighlighterFragmenter struct { Name string } diff --git a/typedapi/types/enums/highlighterorder/highlighterorder.go b/typedapi/types/enums/highlighterorder/highlighterorder.go index d1c9526082..325232f845 100644 --- a/typedapi/types/enums/highlighterorder/highlighterorder.go +++ b/typedapi/types/enums/highlighterorder/highlighterorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package highlighterorder package highlighterorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/highlighting.ts#L168-L170 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/highlighting.ts#L167-L169 type HighlighterOrder struct { Name string } diff --git a/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go b/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go index 0cfcce42a7..38d6bc19bd 100644 --- a/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go +++ b/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package highlightertagsschema package highlightertagsschema import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/highlighting.ts#L172-L174 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/highlighting.ts#L171-L173 type HighlighterTagsSchema struct { Name string } diff --git a/typedapi/types/enums/highlightertype/highlightertype.go b/typedapi/types/enums/highlightertype/highlightertype.go index c5dcf8ebc2..a877b57740 100644 --- a/typedapi/types/enums/highlightertype/highlightertype.go +++ b/typedapi/types/enums/highlightertype/highlightertype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package highlightertype package highlightertype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/highlighting.ts#L176-L191 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/highlighting.ts#L175-L190 type HighlighterType struct { Name string } diff --git a/typedapi/types/enums/holtwinterstype/holtwinterstype.go b/typedapi/types/enums/holtwinterstype/holtwinterstype.go index 41577f54b4..997d8653e8 100644 --- a/typedapi/types/enums/holtwinterstype/holtwinterstype.go +++ b/typedapi/types/enums/holtwinterstype/holtwinterstype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package holtwinterstype package holtwinterstype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L283-L286 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L309-L312 type HoltWintersType struct { Name string } diff --git a/typedapi/types/enums/httpinputmethod/httpinputmethod.go b/typedapi/types/enums/httpinputmethod/httpinputmethod.go index c4e973c858..90fe25822f 100644 --- a/typedapi/types/enums/httpinputmethod/httpinputmethod.go +++ b/typedapi/types/enums/httpinputmethod/httpinputmethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package httpinputmethod package httpinputmethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L59-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L59-L65 type HttpInputMethod struct { Name string } diff --git a/typedapi/types/enums/ibdistribution/ibdistribution.go b/typedapi/types/enums/ibdistribution/ibdistribution.go index 2cfed916e1..85b40adc7e 100644 --- a/typedapi/types/enums/ibdistribution/ibdistribution.go +++ b/typedapi/types/enums/ibdistribution/ibdistribution.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package ibdistribution package ibdistribution import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Similarity.ts#L42-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Similarity.ts#L42-L45 type IBDistribution struct { Name string } diff --git a/typedapi/types/enums/iblambda/iblambda.go b/typedapi/types/enums/iblambda/iblambda.go index fb84d165cc..240fd8f0c6 100644 --- a/typedapi/types/enums/iblambda/iblambda.go +++ b/typedapi/types/enums/iblambda/iblambda.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package iblambda package iblambda import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Similarity.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Similarity.ts#L47-L50 type IBLambda struct { Name string } diff --git a/typedapi/types/enums/icucollationalternate/icucollationalternate.go b/typedapi/types/enums/icucollationalternate/icucollationalternate.go index 642d31c2c3..ea643e207b 100644 --- a/typedapi/types/enums/icucollationalternate/icucollationalternate.go +++ b/typedapi/types/enums/icucollationalternate/icucollationalternate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package icucollationalternate package icucollationalternate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L89-L92 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L89-L92 type IcuCollationAlternate struct { Name string } diff --git a/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go b/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go index 95e222e295..d173fcd2ab 100644 --- a/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go +++ b/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package icucollationcasefirst package icucollationcasefirst import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L94-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L94-L97 type IcuCollationCaseFirst struct { Name string } diff --git a/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go b/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go index cc1bfaded9..df087aaee0 100644 --- a/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go +++ b/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package icucollationdecomposition package icucollationdecomposition import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L99-L102 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L99-L102 type IcuCollationDecomposition struct { Name string } diff --git a/typedapi/types/enums/icucollationstrength/icucollationstrength.go b/typedapi/types/enums/icucollationstrength/icucollationstrength.go index 0cfca23c49..fad6fc47e5 100644 --- a/typedapi/types/enums/icucollationstrength/icucollationstrength.go +++ b/typedapi/types/enums/icucollationstrength/icucollationstrength.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package icucollationstrength package icucollationstrength import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L104-L110 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L104-L110 type IcuCollationStrength struct { Name string } diff --git a/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go b/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go index b977fb28f8..00033b5b9e 100644 --- a/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go +++ b/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package icunormalizationmode package icunormalizationmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L78-L81 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L78-L81 type IcuNormalizationMode struct { Name string } diff --git a/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go b/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go index a74b1fa8b4..0ee81d80f6 100644 --- a/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go +++ b/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package icunormalizationtype package icunormalizationtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L83-L87 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L83-L87 type IcuNormalizationType struct { Name string } diff --git a/typedapi/types/enums/icutransformdirection/icutransformdirection.go b/typedapi/types/enums/icutransformdirection/icutransformdirection.go index 0a5dea67ee..05796e2353 100644 --- a/typedapi/types/enums/icutransformdirection/icutransformdirection.go +++ b/typedapi/types/enums/icutransformdirection/icutransformdirection.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package icutransformdirection package icutransformdirection import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L73-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L73-L76 type IcuTransformDirection struct { Name string } diff --git a/typedapi/types/enums/impactarea/impactarea.go b/typedapi/types/enums/impactarea/impactarea.go index 55b9d86f2f..ecc1b51cb2 100644 --- a/typedapi/types/enums/impactarea/impactarea.go +++ b/typedapi/types/enums/impactarea/impactarea.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package impactarea package impactarea import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L72-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L73-L78 type ImpactArea struct { Name string } diff --git a/typedapi/types/enums/include/include.go b/typedapi/types/enums/include/include.go index 883443d4a4..02a1307aca 100644 --- a/typedapi/types/enums/include/include.go +++ b/typedapi/types/enums/include/include.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package include package include import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Include.ts#L20-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Include.ts#L20-L47 type Include struct { Name string } diff --git a/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go b/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go index 7852b2cfdd..fe19fbd02e 100644 --- a/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go +++ b/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package indexcheckonstartup package indexcheckonstartup import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L262-L269 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L270-L277 type IndexCheckOnStartup struct { Name string } diff --git a/typedapi/types/enums/indexingjobstate/indexingjobstate.go b/typedapi/types/enums/indexingjobstate/indexingjobstate.go index c458d22e45..f8cdae7f7f 100644 --- a/typedapi/types/enums/indexingjobstate/indexingjobstate.go +++ b/typedapi/types/enums/indexingjobstate/indexingjobstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package indexingjobstate package indexingjobstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_jobs/types.ts#L66-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_jobs/types.ts#L77-L83 type IndexingJobState struct { Name string } diff --git a/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go b/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go index 6fa65e77f1..cb50fa4162 100644 --- a/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go +++ b/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package indexmetadatastate package indexmetadatastate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L225-L232 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L225-L232 type IndexMetadataState struct { Name string } diff --git a/typedapi/types/enums/indexoptions/indexoptions.go b/typedapi/types/enums/indexoptions/indexoptions.go index c91bd63b07..65fc1e6778 100644 --- a/typedapi/types/enums/indexoptions/indexoptions.go +++ b/typedapi/types/enums/indexoptions/indexoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package indexoptions package indexoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L257-L262 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L274-L279 type IndexOptions struct { Name string } diff --git a/typedapi/types/enums/indexprivilege/indexprivilege.go b/typedapi/types/enums/indexprivilege/indexprivilege.go index 57fb358960..81f95ec0da 100644 --- a/typedapi/types/enums/indexprivilege/indexprivilege.go +++ b/typedapi/types/enums/indexprivilege/indexprivilege.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package indexprivilege package indexprivilege import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L292-L334 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L386-L428 type IndexPrivilege struct { Name string } diff --git a/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go b/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go index d09c9f58fe..0df7dcdd28 100644 --- a/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go +++ b/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package indexroutingallocationoptions package indexroutingallocationoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexRouting.ts#L38-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexRouting.ts#L38-L43 type IndexRoutingAllocationOptions struct { Name string } diff --git a/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go b/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go index ce0ffb61b5..0a0e8dd4ec 100644 --- a/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go +++ b/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package indexroutingrebalanceoptions package indexroutingrebalanceoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexRouting.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexRouting.ts#L45-L50 type IndexRoutingRebalanceOptions struct { Name string } diff --git a/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go b/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go index 3d5de14b01..f24928ba39 100644 --- a/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go +++ b/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package indicatorhealthstatus package indicatorhealthstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L25-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L25-L30 type IndicatorHealthStatus struct { Name string } diff --git a/typedapi/types/enums/indicesblockoptions/indicesblockoptions.go b/typedapi/types/enums/indicesblockoptions/indicesblockoptions.go index ee331ecccf..b29d543d27 100644 --- a/typedapi/types/enums/indicesblockoptions/indicesblockoptions.go +++ b/typedapi/types/enums/indicesblockoptions/indicesblockoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package indicesblockoptions package indicesblockoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/add_block/IndicesAddBlockRequest.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/add_block/IndicesAddBlockRequest.ts#L91-L100 type IndicesBlockOptions struct { Name string } diff --git a/typedapi/types/enums/inputtype/inputtype.go b/typedapi/types/enums/inputtype/inputtype.go index 410c75c53f..6030d3080f 100644 --- a/typedapi/types/enums/inputtype/inputtype.go +++ b/typedapi/types/enums/inputtype/inputtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package inputtype package inputtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L100-L104 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L100-L104 type InputType struct { Name string } diff --git a/typedapi/types/enums/jobblockedreason/jobblockedreason.go b/typedapi/types/enums/jobblockedreason/jobblockedreason.go index 4b24421a61..402d98a20e 100644 --- a/typedapi/types/enums/jobblockedreason/jobblockedreason.go +++ b/typedapi/types/enums/jobblockedreason/jobblockedreason.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package jobblockedreason package jobblockedreason import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L397-L401 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L397-L401 type JobBlockedReason struct { Name string } diff --git a/typedapi/types/enums/jobstate/jobstate.go b/typedapi/types/enums/jobstate/jobstate.go index 3611896c1d..9e731eb5c4 100644 --- a/typedapi/types/enums/jobstate/jobstate.go +++ b/typedapi/types/enums/jobstate/jobstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package jobstate package jobstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L36-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L36-L52 type JobState struct { Name string } diff --git a/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go b/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go index c897db940c..e8db625fc0 100644 --- a/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go +++ b/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package jsonprocessorconflictstrategy package jsonprocessorconflictstrategy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L849-L854 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1151-L1156 type JsonProcessorConflictStrategy struct { Name string } diff --git a/typedapi/types/enums/keeptypesmode/keeptypesmode.go b/typedapi/types/enums/keeptypesmode/keeptypesmode.go index e06b49ba2b..4270817760 100644 --- a/typedapi/types/enums/keeptypesmode/keeptypesmode.go +++ b/typedapi/types/enums/keeptypesmode/keeptypesmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package keeptypesmode package keeptypesmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L215-L218 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L214-L217 type KeepTypesMode struct { Name string } diff --git a/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go b/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go index fd99a9046f..009791215a 100644 --- a/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go +++ b/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package kuromojitokenizationmode package kuromojitokenizationmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/kuromoji-plugin.ts#L52-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/kuromoji-plugin.ts#L52-L56 type KuromojiTokenizationMode struct { Name string } diff --git a/typedapi/types/enums/language/language.go b/typedapi/types/enums/language/language.go deleted file mode 100644 index b8ea254b47..0000000000 --- a/typedapi/types/enums/language/language.go +++ /dev/null @@ -1,185 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -// Package language -package language - -import "strings" - -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/languages.ts#L20-L55 -type Language struct { - Name string -} - -var ( - Arabic = Language{"Arabic"} - - Armenian = Language{"Armenian"} - - Basque = Language{"Basque"} - - Brazilian = Language{"Brazilian"} - - Bulgarian = Language{"Bulgarian"} - - Catalan = Language{"Catalan"} - - Chinese = Language{"Chinese"} - - Cjk = Language{"Cjk"} - - Czech = Language{"Czech"} - - Danish = Language{"Danish"} - - Dutch = Language{"Dutch"} - - English = Language{"English"} - - Estonian = Language{"Estonian"} - - Finnish = Language{"Finnish"} - - French = Language{"French"} - - Galician = Language{"Galician"} - - German = Language{"German"} - - Greek = Language{"Greek"} - - Hindi = Language{"Hindi"} - - Hungarian = Language{"Hungarian"} - - Indonesian = Language{"Indonesian"} - - Irish = Language{"Irish"} - - Italian = Language{"Italian"} - - Latvian = Language{"Latvian"} - - Norwegian = Language{"Norwegian"} - - Persian = Language{"Persian"} - - Portuguese = Language{"Portuguese"} - - Romanian = Language{"Romanian"} - - Russian = Language{"Russian"} - - Sorani = Language{"Sorani"} - - Spanish = Language{"Spanish"} - - Swedish = Language{"Swedish"} - - Turkish = Language{"Turkish"} - - Thai = Language{"Thai"} -) - -func (l Language) MarshalText() (text []byte, err error) { - return []byte(l.String()), nil -} - -func (l *Language) UnmarshalText(text []byte) error { - switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - - case "Arabic": - *l = Arabic - case "Armenian": - *l = Armenian - case "Basque": - *l = Basque - case "Brazilian": - *l = Brazilian - case "Bulgarian": - *l = Bulgarian - case "Catalan": - *l = Catalan - case "Chinese": - *l = Chinese - case "Cjk": - *l = Cjk - case "Czech": - *l = Czech - case "Danish": - *l = Danish - case "Dutch": - *l = Dutch - case "English": - *l = English - case "Estonian": - *l = Estonian - case "Finnish": - *l = Finnish - case "French": - *l = French - case "Galician": - *l = Galician - case "German": - *l = German - case "Greek": - *l = Greek - case "Hindi": - *l = Hindi - case "Hungarian": - *l = Hungarian - case "Indonesian": - *l = Indonesian - case "Irish": - *l = Irish - case "Italian": - *l = Italian - case "Latvian": - *l = Latvian - case "Norwegian": - *l = Norwegian - case "Persian": - *l = Persian - case "Portuguese": - *l = Portuguese - case "Romanian": - *l = Romanian - case "Russian": - *l = Russian - case "Sorani": - *l = Sorani - case "Spanish": - *l = Spanish - case "Swedish": - *l = Swedish - case "Turkish": - *l = Turkish - case "Thai": - *l = Thai - default: - *l = Language{string(text)} - } - - return nil -} - -func (l Language) String() string { - return l.Name -} diff --git a/typedapi/types/enums/level/level.go b/typedapi/types/enums/level/level.go index d299dda75b..035b20e5f6 100644 --- a/typedapi/types/enums/level/level.go +++ b/typedapi/types/enums/level/level.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package level package level import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L249-L253 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L253-L257 type Level struct { Name string } diff --git a/typedapi/types/enums/licensestatus/licensestatus.go b/typedapi/types/enums/licensestatus/licensestatus.go index c5bf3a7d36..ba84a33527 100644 --- a/typedapi/types/enums/licensestatus/licensestatus.go +++ b/typedapi/types/enums/licensestatus/licensestatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package licensestatus package licensestatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/_types/License.ts#L35-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/_types/License.ts#L35-L40 type LicenseStatus struct { Name string } diff --git a/typedapi/types/enums/licensetype/licensetype.go b/typedapi/types/enums/licensetype/licensetype.go index 1669a3daca..c658571826 100644 --- a/typedapi/types/enums/licensetype/licensetype.go +++ b/typedapi/types/enums/licensetype/licensetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package licensetype package licensetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/_types/License.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/_types/License.ts#L23-L33 type LicenseType struct { Name string } diff --git a/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go b/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go index 33ecb43d31..cdf1ae91d9 100644 --- a/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go +++ b/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package lifecycleoperationmode package lifecycleoperationmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Lifecycle.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Lifecycle.ts#L20-L24 type LifecycleOperationMode struct { Name string } @@ -43,11 +43,11 @@ func (l LifecycleOperationMode) MarshalText() (text []byte, err error) { func (l *LifecycleOperationMode) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "RUNNING": + case "running": *l = RUNNING - case "STOPPING": + case "stopping": *l = STOPPING - case "STOPPED": + case "stopped": *l = STOPPED default: *l = LifecycleOperationMode{string(text)} diff --git a/typedapi/types/enums/managedby/managedby.go b/typedapi/types/enums/managedby/managedby.go index 0226aa7ced..1b0018e13b 100644 --- a/typedapi/types/enums/managedby/managedby.go +++ b/typedapi/types/enums/managedby/managedby.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package managedby package managedby import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/DataStream.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStream.ts#L32-L37 type ManagedBy struct { Name string } @@ -43,11 +43,11 @@ func (m ManagedBy) MarshalText() (text []byte, err error) { func (m *ManagedBy) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "Index Lifecycle Management": + case "index lifecycle management": *m = Ilm - case "Data stream lifecycle": + case "data stream lifecycle": *m = Datastream - case "Unmanaged": + case "unmanaged": *m = Unmanaged default: *m = ManagedBy{string(text)} diff --git a/typedapi/types/enums/matchtype/matchtype.go b/typedapi/types/enums/matchtype/matchtype.go index 367946f51c..6740e70a21 100644 --- a/typedapi/types/enums/matchtype/matchtype.go +++ b/typedapi/types/enums/matchtype/matchtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package matchtype package matchtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/dynamic-template.ts#L44-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/dynamic-template.ts#L45-L48 type MatchType struct { Name string } diff --git a/typedapi/types/enums/memorystatus/memorystatus.go b/typedapi/types/enums/memorystatus/memorystatus.go index 97314b8aa7..a7fb19a6b2 100644 --- a/typedapi/types/enums/memorystatus/memorystatus.go +++ b/typedapi/types/enums/memorystatus/memorystatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package memorystatus package memorystatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Model.ts#L88-L92 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Model.ts#L89-L93 type MemoryStatus struct { Name string } diff --git a/typedapi/types/enums/metric/metric.go b/typedapi/types/enums/metric/metric.go index b7f92bf0bb..56f0acbe6a 100644 --- a/typedapi/types/enums/metric/metric.go +++ b/typedapi/types/enums/metric/metric.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package metric package metric import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/_types/Metric.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/_types/Metric.ts#L22-L28 type Metric struct { Name string } diff --git a/typedapi/types/enums/migrationstatus/migrationstatus.go b/typedapi/types/enums/migrationstatus/migrationstatus.go index 0e63ac6ef2..46eee2b6d4 100644 --- a/typedapi/types/enums/migrationstatus/migrationstatus.go +++ b/typedapi/types/enums/migrationstatus/migrationstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package migrationstatus package migrationstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L30-L35 type MigrationStatus struct { Name string } @@ -45,13 +45,13 @@ func (m MigrationStatus) MarshalText() (text []byte, err error) { func (m *MigrationStatus) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "NO_MIGRATION_NEEDED": + case "no_migration_needed": *m = NOMIGRATIONNEEDED - case "MIGRATION_NEEDED": + case "migration_needed": *m = MIGRATIONNEEDED - case "IN_PROGRESS": + case "in_progress": *m = INPROGRESS - case "ERROR": + case "error": *m = ERROR default: *m = MigrationStatus{string(text)} diff --git a/typedapi/types/enums/minimuminterval/minimuminterval.go b/typedapi/types/enums/minimuminterval/minimuminterval.go index 962326e4d4..bd67683c3e 100644 --- a/typedapi/types/enums/minimuminterval/minimuminterval.go +++ b/typedapi/types/enums/minimuminterval/minimuminterval.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package minimuminterval package minimuminterval import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L104-L111 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L112-L119 type MinimumInterval struct { Name string } diff --git a/typedapi/types/enums/missingorder/missingorder.go b/typedapi/types/enums/missingorder/missingorder.go index 59ce6e1b83..430ecc5f24 100644 --- a/typedapi/types/enums/missingorder/missingorder.go +++ b/typedapi/types/enums/missingorder/missingorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package missingorder package missingorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/AggregationContainer.ts#L517-L521 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/AggregationContainer.ts#L536-L540 type MissingOrder struct { Name string } diff --git a/typedapi/types/enums/modeenum/modeenum.go b/typedapi/types/enums/modeenum/modeenum.go new file mode 100644 index 0000000000..484604eeaf --- /dev/null +++ b/typedapi/types/enums/modeenum/modeenum.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package modeenum +package modeenum + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L54-L56 +type ModeEnum struct { + Name string +} + +var ( + Upgrade = ModeEnum{"upgrade"} +) + +func (m ModeEnum) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *ModeEnum) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "upgrade": + *m = Upgrade + default: + *m = ModeEnum{string(text)} + } + + return nil +} + +func (m ModeEnum) String() string { + return m.Name +} diff --git a/typedapi/types/enums/month/month.go b/typedapi/types/enums/month/month.go index db5bc52c35..bd8fbd0177 100644 --- a/typedapi/types/enums/month/month.go +++ b/typedapi/types/enums/month/month.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package month package month import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L65-L78 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L65-L78 type Month struct { Name string } diff --git a/typedapi/types/enums/multivaluemode/multivaluemode.go b/typedapi/types/enums/multivaluemode/multivaluemode.go index 94ec9bc4f2..d019341e3b 100644 --- a/typedapi/types/enums/multivaluemode/multivaluemode.go +++ b/typedapi/types/enums/multivaluemode/multivaluemode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package multivaluemode package multivaluemode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L355-L372 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L368-L385 type MultiValueMode struct { Name string } diff --git a/typedapi/types/enums/noderole/noderole.go b/typedapi/types/enums/noderole/noderole.go index 08074f6cc2..f99b50c6ac 100644 --- a/typedapi/types/enums/noderole/noderole.go +++ b/typedapi/types/enums/noderole/noderole.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package noderole package noderole import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Node.ts#L77-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Node.ts#L71-L89 type NodeRole struct { Name string } diff --git a/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go b/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go index da8833763d..6df613daeb 100644 --- a/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go +++ b/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package noridecompoundmode package noridecompoundmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L75-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/nori-plugin.ts#L22-L26 type NoriDecompoundMode struct { Name string } diff --git a/typedapi/types/enums/normalization/normalization.go b/typedapi/types/enums/normalization/normalization.go index 5dadc90032..1d0490d731 100644 --- a/typedapi/types/enums/normalization/normalization.go +++ b/typedapi/types/enums/normalization/normalization.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package normalization package normalization import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Similarity.ts#L52-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Similarity.ts#L52-L58 type Normalization struct { Name string } diff --git a/typedapi/types/enums/normalizemethod/normalizemethod.go b/typedapi/types/enums/normalizemethod/normalizemethod.go index 3f49def2bf..1bd99dddfd 100644 --- a/typedapi/types/enums/normalizemethod/normalizemethod.go +++ b/typedapi/types/enums/normalizemethod/normalizemethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package normalizemethod package normalizemethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L326-L352 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L361-L387 type NormalizeMethod struct { Name string } diff --git a/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go b/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go index daf74d9345..fb8d2361dd 100644 --- a/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go +++ b/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package numericfielddataformat package numericfielddataformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/NumericFielddataFormat.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/NumericFielddataFormat.ts#L20-L23 type NumericFielddataFormat struct { Name string } diff --git a/typedapi/types/enums/onscripterror/onscripterror.go b/typedapi/types/enums/onscripterror/onscripterror.go index a782188e2d..4449792d49 100644 --- a/typedapi/types/enums/onscripterror/onscripterror.go +++ b/typedapi/types/enums/onscripterror/onscripterror.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package onscripterror package onscripterror import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L137-L140 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L141-L144 type OnScriptError struct { Name string } diff --git a/typedapi/types/enums/openaitasktype/openaitasktype.go b/typedapi/types/enums/openaitasktype/openaitasktype.go new file mode 100644 index 0000000000..a40a01f3de --- /dev/null +++ b/typedapi/types/enums/openaitasktype/openaitasktype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package openaitasktype +package openaitasktype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_openai/PutOpenAiRequest.ts#L84-L88 +type OpenAITaskType struct { + Name string +} + +var ( + Chatcompletion = OpenAITaskType{"chat_completion"} + + Completion = OpenAITaskType{"completion"} + + Textembedding = OpenAITaskType{"text_embedding"} +) + +func (o OpenAITaskType) MarshalText() (text []byte, err error) { + return []byte(o.String()), nil +} + +func (o *OpenAITaskType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "chat_completion": + *o = Chatcompletion + case "completion": + *o = Completion + case "text_embedding": + *o = Textembedding + default: + *o = OpenAITaskType{string(text)} + } + + return nil +} + +func (o OpenAITaskType) String() string { + return o.Name +} diff --git a/typedapi/types/enums/operationtype/operationtype.go b/typedapi/types/enums/operationtype/operationtype.go index dd5eea4fcd..9cc43516e5 100644 --- a/typedapi/types/enums/operationtype/operationtype.go +++ b/typedapi/types/enums/operationtype/operationtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package operationtype package operationtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/types.ts#L83-L88 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/types.ts#L93-L98 type OperationType struct { Name string } diff --git a/typedapi/types/enums/operator/operator.go b/typedapi/types/enums/operator/operator.go index d6c5c24505..f6a4e3efc2 100644 --- a/typedapi/types/enums/operator/operator.go +++ b/typedapi/types/enums/operator/operator.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package operator package operator import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/Operator.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/Operator.ts#L22-L27 type Operator struct { Name string } diff --git a/typedapi/types/enums/optype/optype.go b/typedapi/types/enums/optype/optype.go index 1a4e883e6b..635103c383 100644 --- a/typedapi/types/enums/optype/optype.go +++ b/typedapi/types/enums/optype/optype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package optype package optype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L255-L264 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L259-L268 type OpType struct { Name string } diff --git a/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go b/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go index a8242b2059..b4582a8497 100644 --- a/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go +++ b/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package pagerdutycontexttype package pagerdutycontexttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L67-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L67-L70 type PagerDutyContextType struct { Name string } diff --git a/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go b/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go index 6880bae0d6..2b98f14cd8 100644 --- a/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go +++ b/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package pagerdutyeventtype package pagerdutyeventtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L72-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L72-L76 type PagerDutyEventType struct { Name string } diff --git a/typedapi/types/enums/painlesscontext/painlesscontext.go b/typedapi/types/enums/painlesscontext/painlesscontext.go new file mode 100644 index 0000000000..e193584796 --- /dev/null +++ b/typedapi/types/enums/painlesscontext/painlesscontext.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package painlesscontext +package painlesscontext + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/scripts_painless_execute/types.ts#L57-L80 +type PainlessContext struct { + Name string +} + +var ( + Painlesstest = PainlessContext{"painless_test"} + + Filter = PainlessContext{"filter"} + + Score = PainlessContext{"score"} + + Booleanfield = PainlessContext{"boolean_field"} + + Datefield = PainlessContext{"date_field"} + + Doublefield = PainlessContext{"double_field"} + + Geopointfield = PainlessContext{"geo_point_field"} + + Ipfield = PainlessContext{"ip_field"} + + Keywordfield = PainlessContext{"keyword_field"} + + Longfield = PainlessContext{"long_field"} + + Compositefield = PainlessContext{"composite_field"} +) + +func (p PainlessContext) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PainlessContext) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "painless_test": + *p = Painlesstest + case "filter": + *p = Filter + case "score": + *p = Score + case "boolean_field": + *p = Booleanfield + case "date_field": + *p = Datefield + case "double_field": + *p = Doublefield + case "geo_point_field": + *p = Geopointfield + case "ip_field": + *p = Ipfield + case "keyword_field": + *p = Keywordfield + case "long_field": + *p = Longfield + case "composite_field": + *p = Compositefield + default: + *p = PainlessContext{string(text)} + } + + return nil +} + +func (p PainlessContext) String() string { + return p.Name +} diff --git a/typedapi/types/enums/phoneticencoder/phoneticencoder.go b/typedapi/types/enums/phoneticencoder/phoneticencoder.go index a5754e6cb4..8ef64656d0 100644 --- a/typedapi/types/enums/phoneticencoder/phoneticencoder.go +++ b/typedapi/types/enums/phoneticencoder/phoneticencoder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package phoneticencoder package phoneticencoder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/phonetic-plugin.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/phonetic-plugin.ts#L23-L36 type PhoneticEncoder struct { Name string } diff --git a/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go b/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go index 086bebaaac..6dacaf4b9a 100644 --- a/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go +++ b/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package phoneticlanguage package phoneticlanguage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/phonetic-plugin.ts#L38-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/phonetic-plugin.ts#L38-L51 type PhoneticLanguage struct { Name string } diff --git a/typedapi/types/enums/phoneticnametype/phoneticnametype.go b/typedapi/types/enums/phoneticnametype/phoneticnametype.go index d2585b391b..1587ce91a2 100644 --- a/typedapi/types/enums/phoneticnametype/phoneticnametype.go +++ b/typedapi/types/enums/phoneticnametype/phoneticnametype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package phoneticnametype package phoneticnametype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/phonetic-plugin.ts#L53-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/phonetic-plugin.ts#L53-L57 type PhoneticNameType struct { Name string } diff --git a/typedapi/types/enums/phoneticruletype/phoneticruletype.go b/typedapi/types/enums/phoneticruletype/phoneticruletype.go index f5f2211b02..e660918da8 100644 --- a/typedapi/types/enums/phoneticruletype/phoneticruletype.go +++ b/typedapi/types/enums/phoneticruletype/phoneticruletype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package phoneticruletype package phoneticruletype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/phonetic-plugin.ts#L59-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/phonetic-plugin.ts#L59-L62 type PhoneticRuleType struct { Name string } diff --git a/typedapi/types/enums/policytype/policytype.go b/typedapi/types/enums/policytype/policytype.go index a91d5d51b4..e41dfcb7cd 100644 --- a/typedapi/types/enums/policytype/policytype.go +++ b/typedapi/types/enums/policytype/policytype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package policytype package policytype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/_types/Policy.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/_types/Policy.ts#L28-L32 type PolicyType struct { Name string } diff --git a/typedapi/types/enums/quantifier/quantifier.go b/typedapi/types/enums/quantifier/quantifier.go index 703ce10ff5..13c8c58e61 100644 --- a/typedapi/types/enums/quantifier/quantifier.go +++ b/typedapi/types/enums/quantifier/quantifier.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package quantifier package quantifier import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Conditions.ts#L74-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Conditions.ts#L74-L77 type Quantifier struct { Name string } diff --git a/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go b/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go index 0c3f7e2910..a5084ec09a 100644 --- a/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go +++ b/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package queryrulecriteriatype package queryrulecriteriatype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/_types/QueryRuleset.ts#L54-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/_types/QueryRuleset.ts#L95-L108 type QueryRuleCriteriaType struct { Name string } diff --git a/typedapi/types/enums/queryruletype/queryruletype.go b/typedapi/types/enums/queryruletype/queryruletype.go index 2764992796..88623c3208 100644 --- a/typedapi/types/enums/queryruletype/queryruletype.go +++ b/typedapi/types/enums/queryruletype/queryruletype.go @@ -16,20 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package queryruletype package queryruletype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/_types/QueryRuleset.ts#L44-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/_types/QueryRuleset.ts#L60-L63 type QueryRuleType struct { Name string } var ( Pinned = QueryRuleType{"pinned"} + + Exclude = QueryRuleType{"exclude"} ) func (q QueryRuleType) MarshalText() (text []byte, err error) { @@ -41,6 +43,8 @@ func (q *QueryRuleType) UnmarshalText(text []byte) error { case "pinned": *q = Pinned + case "exclude": + *q = Exclude default: *q = QueryRuleType{string(text)} } diff --git a/typedapi/types/enums/rangerelation/rangerelation.go b/typedapi/types/enums/rangerelation/rangerelation.go index 7af94fa1be..1a9f4feabe 100644 --- a/typedapi/types/enums/rangerelation/rangerelation.go +++ b/typedapi/types/enums/rangerelation/rangerelation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package rangerelation package rangerelation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L172-L185 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L188-L201 type RangeRelation struct { Name string } diff --git a/typedapi/types/enums/ratemode/ratemode.go b/typedapi/types/enums/ratemode/ratemode.go index a89da80065..2342556bfd 100644 --- a/typedapi/types/enums/ratemode/ratemode.go +++ b/typedapi/types/enums/ratemode/ratemode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package ratemode package ratemode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L243-L252 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L252-L261 type RateMode struct { Name string } diff --git a/typedapi/types/enums/refresh/refresh.go b/typedapi/types/enums/refresh/refresh.go index 80fde3805c..320f0f461a 100644 --- a/typedapi/types/enums/refresh/refresh.go +++ b/typedapi/types/enums/refresh/refresh.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package refresh package refresh import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L266-L273 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L270-L277 type Refresh struct { Name string } diff --git a/typedapi/types/enums/remoteclusterprivilege/remoteclusterprivilege.go b/typedapi/types/enums/remoteclusterprivilege/remoteclusterprivilege.go new file mode 100644 index 0000000000..d5916cb6f8 --- /dev/null +++ b/typedapi/types/enums/remoteclusterprivilege/remoteclusterprivilege.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package remoteclusterprivilege +package remoteclusterprivilege + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L201-L213 +type RemoteClusterPrivilege struct { + Name string +} + +var ( + Monitorenrich = RemoteClusterPrivilege{"monitor_enrich"} + + Monitorstats = RemoteClusterPrivilege{"monitor_stats"} +) + +func (r RemoteClusterPrivilege) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RemoteClusterPrivilege) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "monitor_enrich": + *r = Monitorenrich + case "monitor_stats": + *r = Monitorstats + default: + *r = RemoteClusterPrivilege{string(text)} + } + + return nil +} + +func (r RemoteClusterPrivilege) String() string { + return r.Name +} diff --git a/typedapi/types/enums/responsecontenttype/responsecontenttype.go b/typedapi/types/enums/responsecontenttype/responsecontenttype.go index 68132f80c9..fc373d3ead 100644 --- a/typedapi/types/enums/responsecontenttype/responsecontenttype.go +++ b/typedapi/types/enums/responsecontenttype/responsecontenttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package responsecontenttype package responsecontenttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L106-L110 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L106-L110 type ResponseContentType struct { Name string } diff --git a/typedapi/types/enums/restrictionworkflow/restrictionworkflow.go b/typedapi/types/enums/restrictionworkflow/restrictionworkflow.go new file mode 100644 index 0000000000..5448deec42 --- /dev/null +++ b/typedapi/types/enums/restrictionworkflow/restrictionworkflow.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package restrictionworkflow +package restrictionworkflow + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RoleDescriptor.ts#L143-L146 +type RestrictionWorkflow struct { + Name string +} + +var ( + Searchapplicationquery = RestrictionWorkflow{"search_application_query"} +) + +func (r RestrictionWorkflow) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RestrictionWorkflow) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "search_application_query": + *r = Searchapplicationquery + default: + *r = RestrictionWorkflow{string(text)} + } + + return nil +} + +func (r RestrictionWorkflow) String() string { + return r.Name +} diff --git a/typedapi/types/enums/result/result.go b/typedapi/types/enums/result/result.go index 361cf9f2c9..2cadb123a8 100644 --- a/typedapi/types/enums/result/result.go +++ b/typedapi/types/enums/result/result.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package result package result import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Result.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Result.ts#L20-L26 type Result struct { Name string } diff --git a/typedapi/types/enums/resultposition/resultposition.go b/typedapi/types/enums/resultposition/resultposition.go index c21a8447f9..755b19e097 100644 --- a/typedapi/types/enums/resultposition/resultposition.go +++ b/typedapi/types/enums/resultposition/resultposition.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package resultposition package resultposition import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/eql/search/types.ts#L20-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/eql/search/types.ts#L20-L32 type ResultPosition struct { Name string } diff --git a/typedapi/types/enums/routingstate/routingstate.go b/typedapi/types/enums/routingstate/routingstate.go index 1b124dddbd..3cdb414985 100644 --- a/typedapi/types/enums/routingstate/routingstate.go +++ b/typedapi/types/enums/routingstate/routingstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package routingstate package routingstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L351-L372 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L407-L428 type RoutingState struct { Name string } diff --git a/typedapi/types/enums/ruleaction/ruleaction.go b/typedapi/types/enums/ruleaction/ruleaction.go index 208beee5c3..d83755646a 100644 --- a/typedapi/types/enums/ruleaction/ruleaction.go +++ b/typedapi/types/enums/ruleaction/ruleaction.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package ruleaction package ruleaction import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Rule.ts#L41-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Rule.ts#L41-L50 type RuleAction struct { Name string } diff --git a/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go b/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go index 1af195aa53..3a69f0f0b8 100644 --- a/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go +++ b/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package runtimefieldtype package runtimefieldtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/RuntimeFields.ts#L56-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/RuntimeFields.ts#L62-L72 type RuntimeFieldType struct { Name string } diff --git a/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go b/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go index 79d299b326..7f945021c0 100644 --- a/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go +++ b/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package sampleraggregationexecutionhint package sampleraggregationexecutionhint import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L345-L358 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L359-L372 type SamplerAggregationExecutionHint struct { Name string } diff --git a/typedapi/types/enums/scoremode/scoremode.go b/typedapi/types/enums/scoremode/scoremode.go index 7424641e2c..73c759a148 100644 --- a/typedapi/types/enums/scoremode/scoremode.go +++ b/typedapi/types/enums/scoremode/scoremode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package scoremode package scoremode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/rescoring.ts#L64-L86 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/rescoring.ts#L64-L86 type ScoreMode struct { Name string } diff --git a/typedapi/types/enums/scriptlanguage/scriptlanguage.go b/typedapi/types/enums/scriptlanguage/scriptlanguage.go index d38acdd3ff..5b8f15a3e2 100644 --- a/typedapi/types/enums/scriptlanguage/scriptlanguage.go +++ b/typedapi/types/enums/scriptlanguage/scriptlanguage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package scriptlanguage package scriptlanguage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Scripting.ts#L24-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Scripting.ts#L24-L45 type ScriptLanguage struct { Name string } diff --git a/typedapi/types/enums/scriptsorttype/scriptsorttype.go b/typedapi/types/enums/scriptsorttype/scriptsorttype.go index 3aca1a929e..bc977ca003 100644 --- a/typedapi/types/enums/scriptsorttype/scriptsorttype.go +++ b/typedapi/types/enums/scriptsorttype/scriptsorttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package scriptsorttype package scriptsorttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L81-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L80-L84 type ScriptSortType struct { Name string } diff --git a/typedapi/types/enums/searchtype/searchtype.go b/typedapi/types/enums/searchtype/searchtype.go index cdddc23524..93b4cd9b17 100644 --- a/typedapi/types/enums/searchtype/searchtype.go +++ b/typedapi/types/enums/searchtype/searchtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package searchtype package searchtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L275-L280 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L279-L284 type SearchType struct { Name string } diff --git a/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go b/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go index d004a85c46..923d041c25 100644 --- a/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go +++ b/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package segmentsortmissing package segmentsortmissing import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSegmentSort.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSegmentSort.ts#L43-L46 type SegmentSortMissing struct { Name string } diff --git a/typedapi/types/enums/segmentsortmode/segmentsortmode.go b/typedapi/types/enums/segmentsortmode/segmentsortmode.go index 044b8cd41f..ceffa8ba38 100644 --- a/typedapi/types/enums/segmentsortmode/segmentsortmode.go +++ b/typedapi/types/enums/segmentsortmode/segmentsortmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package segmentsortmode package segmentsortmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSegmentSort.ts#L36-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSegmentSort.ts#L36-L41 type SegmentSortMode struct { Name string } diff --git a/typedapi/types/enums/segmentsortorder/segmentsortorder.go b/typedapi/types/enums/segmentsortorder/segmentsortorder.go index ff68f673c4..4f10e01ab6 100644 --- a/typedapi/types/enums/segmentsortorder/segmentsortorder.go +++ b/typedapi/types/enums/segmentsortorder/segmentsortorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package segmentsortorder package segmentsortorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSegmentSort.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSegmentSort.ts#L29-L34 type SegmentSortOrder struct { Name string } diff --git a/typedapi/types/enums/servicetype/servicetype.go b/typedapi/types/enums/servicetype/servicetype.go new file mode 100644 index 0000000000..00b35e1f15 --- /dev/null +++ b/typedapi/types/enums/servicetype/servicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package servicetype +package servicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_watsonx/PutWatsonxRequest.ts#L76-L78 +type ServiceType struct { + Name string +} + +var ( + Watsonxai = ServiceType{"watsonxai"} +) + +func (s ServiceType) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "watsonxai": + *s = Watsonxai + default: + *s = ServiceType{string(text)} + } + + return nil +} + +func (s ServiceType) String() string { + return s.Name +} diff --git a/typedapi/types/enums/shapetype/shapetype.go b/typedapi/types/enums/shapetype/shapetype.go index 4f8e37019b..32c518b0ae 100644 --- a/typedapi/types/enums/shapetype/shapetype.go +++ b/typedapi/types/enums/shapetype/shapetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package shapetype package shapetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L1074-L1077 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1473-L1476 type ShapeType struct { Name string } diff --git a/typedapi/types/enums/shardroutingstate/shardroutingstate.go b/typedapi/types/enums/shardroutingstate/shardroutingstate.go index 1761d30a3d..855f315a12 100644 --- a/typedapi/types/enums/shardroutingstate/shardroutingstate.go +++ b/typedapi/types/enums/shardroutingstate/shardroutingstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package shardroutingstate package shardroutingstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L169-L174 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L169-L174 type ShardRoutingState struct { Name string } @@ -45,13 +45,13 @@ func (s ShardRoutingState) MarshalText() (text []byte, err error) { func (s *ShardRoutingState) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "UNASSIGNED": + case "unassigned": *s = UNASSIGNED - case "INITIALIZING": + case "initializing": *s = INITIALIZING - case "STARTED": + case "started": *s = STARTED - case "RELOCATING": + case "relocating": *s = RELOCATING default: *s = ShardRoutingState{string(text)} diff --git a/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go b/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go index 1689f90ccf..c87a18b3c3 100644 --- a/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go +++ b/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package shardsstatsstage package shardsstatsstage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotShardsStatsStage.ts#L20-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotShardsStatsStage.ts#L20-L31 type ShardsStatsStage struct { Name string } @@ -47,15 +47,15 @@ func (s ShardsStatsStage) MarshalText() (text []byte, err error) { func (s *ShardsStatsStage) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "DONE": + case "done": *s = DONE - case "FAILURE": + case "failure": *s = FAILURE - case "FINALIZE": + case "finalize": *s = FINALIZE - case "INIT": + case "init": *s = INIT - case "STARTED": + case "started": *s = STARTED default: *s = ShardsStatsStage{string(text)} diff --git a/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go b/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go index 173959cb56..f3a9f70b54 100644 --- a/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go +++ b/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package shardstoreallocation package shardstoreallocation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shard_stores/types.ts#L48-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shard_stores/types.ts#L47-L51 type ShardStoreAllocation struct { Name string } diff --git a/typedapi/types/enums/shardstorestatus/shardstorestatus.go b/typedapi/types/enums/shardstorestatus/shardstorestatus.go index 40c15d504e..6e6df1e237 100644 --- a/typedapi/types/enums/shardstorestatus/shardstorestatus.go +++ b/typedapi/types/enums/shardstorestatus/shardstorestatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package shardstorestatus package shardstorestatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shard_stores/types.ts#L63-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shard_stores/types.ts#L62-L71 type ShardStoreStatus struct { Name string } diff --git a/typedapi/types/enums/shutdownstatus/shutdownstatus.go b/typedapi/types/enums/shutdownstatus/shutdownstatus.go index aa54e6bd9b..7712dbda0c 100644 --- a/typedapi/types/enums/shutdownstatus/shutdownstatus.go +++ b/typedapi/types/enums/shutdownstatus/shutdownstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package shutdownstatus package shutdownstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L45-L50 type ShutdownStatus struct { Name string } diff --git a/typedapi/types/enums/shutdowntype/shutdowntype.go b/typedapi/types/enums/shutdowntype/shutdowntype.go index 85ed14587d..87a4eeebcf 100644 --- a/typedapi/types/enums/shutdowntype/shutdowntype.go +++ b/typedapi/types/enums/shutdowntype/shutdowntype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package shutdowntype package shutdowntype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L40-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L40-L43 type ShutdownType struct { Name string } diff --git a/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go b/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go index 372ae6174f..d938e6f54d 100644 --- a/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go +++ b/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package simplequerystringflag package simplequerystringflag import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L708-L763 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L729-L784 type SimpleQueryStringFlag struct { Name string } @@ -63,31 +63,31 @@ func (s SimpleQueryStringFlag) MarshalText() (text []byte, err error) { func (s *SimpleQueryStringFlag) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "NONE": + case "none": *s = NONE - case "AND": + case "and": *s = AND - case "NOT": + case "not": *s = NOT - case "OR": + case "or": *s = OR - case "PREFIX": + case "prefix": *s = PREFIX - case "PHRASE": + case "phrase": *s = PHRASE - case "PRECEDENCE": + case "precedence": *s = PRECEDENCE - case "ESCAPE": + case "escape": *s = ESCAPE - case "WHITESPACE": + case "whitespace": *s = WHITESPACE - case "FUZZY": + case "fuzzy": *s = FUZZY - case "NEAR": + case "near": *s = NEAR - case "SLOP": + case "slop": *s = SLOP - case "ALL": + case "all": *s = ALL default: *s = SimpleQueryStringFlag{string(text)} diff --git a/typedapi/types/enums/slicescalculation/slicescalculation.go b/typedapi/types/enums/slicescalculation/slicescalculation.go index f4f885c152..e33a383afa 100644 --- a/typedapi/types/enums/slicescalculation/slicescalculation.go +++ b/typedapi/types/enums/slicescalculation/slicescalculation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package slicescalculation package slicescalculation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L371-L379 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L375-L383 type SlicesCalculation struct { Name string } diff --git a/typedapi/types/enums/snapshotsort/snapshotsort.go b/typedapi/types/enums/snapshotsort/snapshotsort.go index e37599e13f..ae0bd8ba67 100644 --- a/typedapi/types/enums/snapshotsort/snapshotsort.go +++ b/typedapi/types/enums/snapshotsort/snapshotsort.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package snapshotsort package snapshotsort import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotInfo.ts#L73-L93 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotInfo.ts#L73-L93 type SnapshotSort struct { Name string } diff --git a/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go b/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go index b04801f168..b6d718d7bf 100644 --- a/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go +++ b/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package snapshotupgradestate package snapshotupgradestate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Model.ts#L94-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Model.ts#L95-L100 type SnapshotUpgradeState struct { Name string } diff --git a/typedapi/types/enums/snowballlanguage/snowballlanguage.go b/typedapi/types/enums/snowballlanguage/snowballlanguage.go index 9cbdbd9c42..e0612069b5 100644 --- a/typedapi/types/enums/snowballlanguage/snowballlanguage.go +++ b/typedapi/types/enums/snowballlanguage/snowballlanguage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package snowballlanguage package snowballlanguage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/languages.ts#L57-L80 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/languages.ts#L20-L43 type SnowballLanguage struct { Name string } @@ -81,49 +81,49 @@ func (s SnowballLanguage) MarshalText() (text []byte, err error) { func (s *SnowballLanguage) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "Armenian": + case "armenian": *s = Armenian - case "Basque": + case "basque": *s = Basque - case "Catalan": + case "catalan": *s = Catalan - case "Danish": + case "danish": *s = Danish - case "Dutch": + case "dutch": *s = Dutch - case "English": + case "english": *s = English - case "Finnish": + case "finnish": *s = Finnish - case "French": + case "french": *s = French - case "German": + case "german": *s = German - case "German2": + case "german2": *s = German2 - case "Hungarian": + case "hungarian": *s = Hungarian - case "Italian": + case "italian": *s = Italian - case "Kp": + case "kp": *s = Kp - case "Lovins": + case "lovins": *s = Lovins - case "Norwegian": + case "norwegian": *s = Norwegian - case "Porter": + case "porter": *s = Porter - case "Portuguese": + case "portuguese": *s = Portuguese - case "Romanian": + case "romanian": *s = Romanian - case "Russian": + case "russian": *s = Russian - case "Spanish": + case "spanish": *s = Spanish - case "Swedish": + case "swedish": *s = Swedish - case "Turkish": + case "turkish": *s = Turkish default: *s = SnowballLanguage{string(text)} diff --git a/typedapi/types/enums/sortmode/sortmode.go b/typedapi/types/enums/sortmode/sortmode.go index 052d75f531..374805283d 100644 --- a/typedapi/types/enums/sortmode/sortmode.go +++ b/typedapi/types/enums/sortmode/sortmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package sortmode package sortmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L109-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L108-L117 type SortMode struct { Name string } diff --git a/typedapi/types/enums/sortorder/sortorder.go b/typedapi/types/enums/sortorder/sortorder.go index 201fdab7f3..076672e2a9 100644 --- a/typedapi/types/enums/sortorder/sortorder.go +++ b/typedapi/types/enums/sortorder/sortorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package sortorder package sortorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L120-L129 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L119-L128 type SortOrder struct { Name string } diff --git a/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go b/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go index 4d0f227577..fe0f446b58 100644 --- a/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go +++ b/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package sourcefieldmode package sourcefieldmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/meta-fields.ts#L67-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/meta-fields.ts#L67-L75 type SourceFieldMode struct { Name string } diff --git a/typedapi/types/enums/sourcemode/sourcemode.go b/typedapi/types/enums/sourcemode/sourcemode.go new file mode 100644 index 0000000000..ec86d80008 --- /dev/null +++ b/typedapi/types/enums/sourcemode/sourcemode.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package sourcemode +package sourcemode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L509-L513 +type SourceMode struct { + Name string +} + +var ( + Disabled = SourceMode{"disabled"} + + Stored = SourceMode{"stored"} + + Synthetic = SourceMode{"synthetic"} +) + +func (s SourceMode) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SourceMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "disabled": + *s = Disabled + case "stored": + *s = Stored + case "synthetic": + *s = Synthetic + default: + *s = SourceMode{string(text)} + } + + return nil +} + +func (s SourceMode) String() string { + return s.Name +} diff --git a/typedapi/types/enums/sqlformat/sqlformat.go b/typedapi/types/enums/sqlformat/sqlformat.go new file mode 100644 index 0000000000..46e911d453 --- /dev/null +++ b/typedapi/types/enums/sqlformat/sqlformat.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package sqlformat +package sqlformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/sql/query/QuerySqlRequest.ts#L154-L162 +type SqlFormat struct { + Name string +} + +var ( + Csv = SqlFormat{"csv"} + + Json = SqlFormat{"json"} + + Tsv = SqlFormat{"tsv"} + + Txt = SqlFormat{"txt"} + + Yaml = SqlFormat{"yaml"} + + Cbor = SqlFormat{"cbor"} + + Smile = SqlFormat{"smile"} +) + +func (s SqlFormat) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SqlFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "csv": + *s = Csv + case "json": + *s = Json + case "tsv": + *s = Tsv + case "txt": + *s = Txt + case "yaml": + *s = Yaml + case "cbor": + *s = Cbor + case "smile": + *s = Smile + default: + *s = SqlFormat{string(text)} + } + + return nil +} + +func (s SqlFormat) String() string { + return s.Name +} diff --git a/typedapi/types/enums/statslevel/statslevel.go b/typedapi/types/enums/statslevel/statslevel.go index 2d9475236c..e81882ed3b 100644 --- a/typedapi/types/enums/statslevel/statslevel.go +++ b/typedapi/types/enums/statslevel/statslevel.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package statslevel package statslevel import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/searchable_snapshots/_types/stats.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/searchable_snapshots/_types/stats.ts#L20-L24 type StatsLevel struct { Name string } diff --git a/typedapi/types/enums/storagetype/storagetype.go b/typedapi/types/enums/storagetype/storagetype.go index 3981f57f18..e2eaf592ae 100644 --- a/typedapi/types/enums/storagetype/storagetype.go +++ b/typedapi/types/enums/storagetype/storagetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package storagetype package storagetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L520-L548 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L545-L573 type StorageType struct { Name string } diff --git a/typedapi/types/enums/stringdistance/stringdistance.go b/typedapi/types/enums/stringdistance/stringdistance.go index 6a5e1fb411..c7e89f5bfc 100644 --- a/typedapi/types/enums/stringdistance/stringdistance.go +++ b/typedapi/types/enums/stringdistance/stringdistance.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package stringdistance package stringdistance import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L472-L493 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L472-L493 type StringDistance struct { Name string } diff --git a/typedapi/types/enums/subobjects/subobjects.go b/typedapi/types/enums/subobjects/subobjects.go new file mode 100644 index 0000000000..208abf0930 --- /dev/null +++ b/typedapi/types/enums/subobjects/subobjects.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package subobjects +package subobjects + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/TypeMapping.ts#L63-L74 +type Subobjects struct { + Name string +} + +var ( + True = Subobjects{"true"} + + False = Subobjects{"false"} + + Auto = Subobjects{"auto"} +) + +func (s *Subobjects) UnmarshalJSON(data []byte) error { + return s.UnmarshalText(data) +} + +func (s Subobjects) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *Subobjects) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "true": + *s = True + case "false": + *s = False + case "auto": + *s = Auto + default: + *s = Subobjects{string(text)} + } + + return nil +} + +func (s Subobjects) String() string { + return s.Name +} diff --git a/typedapi/types/enums/suggestmode/suggestmode.go b/typedapi/types/enums/suggestmode/suggestmode.go index 01160b6bf0..60ff736884 100644 --- a/typedapi/types/enums/suggestmode/suggestmode.go +++ b/typedapi/types/enums/suggestmode/suggestmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package suggestmode package suggestmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L282-L295 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L286-L299 type SuggestMode struct { Name string } diff --git a/typedapi/types/enums/suggestsort/suggestsort.go b/typedapi/types/enums/suggestsort/suggestsort.go index 402d740da0..fd4e419146 100644 --- a/typedapi/types/enums/suggestsort/suggestsort.go +++ b/typedapi/types/enums/suggestsort/suggestsort.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package suggestsort package suggestsort import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L495-L504 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L495-L504 type SuggestSort struct { Name string } diff --git a/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go b/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go index bd210c01d5..4e723704fc 100644 --- a/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go +++ b/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package syncjobtriggermethod package syncjobtriggermethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/SyncJob.ts#L48-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/SyncJob.ts#L48-L51 type SyncJobTriggerMethod struct { Name string } diff --git a/typedapi/types/enums/syncjobtype/syncjobtype.go b/typedapi/types/enums/syncjobtype/syncjobtype.go index b075c40d10..f3f185e58b 100644 --- a/typedapi/types/enums/syncjobtype/syncjobtype.go +++ b/typedapi/types/enums/syncjobtype/syncjobtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package syncjobtype package syncjobtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/SyncJob.ts#L42-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/SyncJob.ts#L42-L46 type SyncJobType struct { Name string } diff --git a/typedapi/types/enums/syncstatus/syncstatus.go b/typedapi/types/enums/syncstatus/syncstatus.go index 8428b1c7f4..a6e7bfe081 100644 --- a/typedapi/types/enums/syncstatus/syncstatus.go +++ b/typedapi/types/enums/syncstatus/syncstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package syncstatus package syncstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L138-L146 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L138-L146 type SyncStatus struct { Name string } diff --git a/typedapi/types/enums/synonymformat/synonymformat.go b/typedapi/types/enums/synonymformat/synonymformat.go index 839b28e1e5..ae31ac5780 100644 --- a/typedapi/types/enums/synonymformat/synonymformat.go +++ b/typedapi/types/enums/synonymformat/synonymformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package synonymformat package synonymformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L105-L108 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L104-L107 type SynonymFormat struct { Name string } diff --git a/typedapi/types/enums/syntheticsourcekeepenum/syntheticsourcekeepenum.go b/typedapi/types/enums/syntheticsourcekeepenum/syntheticsourcekeepenum.go new file mode 100644 index 0000000000..d106b63954 --- /dev/null +++ b/typedapi/types/enums/syntheticsourcekeepenum/syntheticsourcekeepenum.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package syntheticsourcekeepenum +package syntheticsourcekeepenum + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/Property.ts#L99-L117 +type SyntheticSourceKeepEnum struct { + Name string +} + +var ( + None = SyntheticSourceKeepEnum{"none"} + + Arrays = SyntheticSourceKeepEnum{"arrays"} + + All = SyntheticSourceKeepEnum{"all"} +) + +func (s SyntheticSourceKeepEnum) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SyntheticSourceKeepEnum) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "none": + *s = None + case "arrays": + *s = Arrays + case "all": + *s = All + default: + *s = SyntheticSourceKeepEnum{string(text)} + } + + return nil +} + +func (s SyntheticSourceKeepEnum) String() string { + return s.Name +} diff --git a/typedapi/types/enums/tasktype/tasktype.go b/typedapi/types/enums/tasktype/tasktype.go index 7dd1d59bd7..90f85c71ef 100644 --- a/typedapi/types/enums/tasktype/tasktype.go +++ b/typedapi/types/enums/tasktype/tasktype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package tasktype package tasktype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/TaskType.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/TaskType.ts#L20-L29 type TaskType struct { Name string } @@ -36,6 +36,8 @@ var ( Rerank = TaskType{"rerank"} Completion = TaskType{"completion"} + + Chatcompletion = TaskType{"chat_completion"} ) func (t TaskType) MarshalText() (text []byte, err error) { @@ -53,6 +55,8 @@ func (t *TaskType) UnmarshalText(text []byte) error { *t = Rerank case "completion": *t = Completion + case "chat_completion": + *t = Chatcompletion default: *t = TaskType{string(text)} } diff --git a/typedapi/types/enums/templateformat/templateformat.go b/typedapi/types/enums/templateformat/templateformat.go index 36d1281a02..d3295881a2 100644 --- a/typedapi/types/enums/templateformat/templateformat.go +++ b/typedapi/types/enums/templateformat/templateformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package templateformat package templateformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/RoleTemplate.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RoleTemplate.ts#L22-L25 type TemplateFormat struct { Name string } diff --git a/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go b/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go index 2c75574d54..93a1be4f64 100644 --- a/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go +++ b/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package termsaggregationcollectmode package termsaggregationcollectmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L987-L996 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1056-L1065 type TermsAggregationCollectMode struct { Name string } diff --git a/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go b/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go index 8ce4e9544e..8174b96645 100644 --- a/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go +++ b/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package termsaggregationexecutionhint package termsaggregationexecutionhint import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L998-L1003 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1067-L1072 type TermsAggregationExecutionHint struct { Name string } diff --git a/typedapi/types/enums/termvectoroption/termvectoroption.go b/typedapi/types/enums/termvectoroption/termvectoroption.go index 08e57674ef..ca910be2f1 100644 --- a/typedapi/types/enums/termvectoroption/termvectoroption.go +++ b/typedapi/types/enums/termvectoroption/termvectoroption.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package termvectoroption package termvectoroption import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/TermVectorOption.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/TermVectorOption.ts#L20-L28 type TermVectorOption struct { Name string } diff --git a/typedapi/types/enums/textquerytype/textquerytype.go b/typedapi/types/enums/textquerytype/textquerytype.go index a2334191df..a8b73e6be2 100644 --- a/typedapi/types/enums/textquerytype/textquerytype.go +++ b/typedapi/types/enums/textquerytype/textquerytype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package textquerytype package textquerytype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L541-L567 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L559-L585 type TextQueryType struct { Name string } diff --git a/typedapi/types/enums/threadtype/threadtype.go b/typedapi/types/enums/threadtype/threadtype.go index e033c4426b..4372c15d96 100644 --- a/typedapi/types/enums/threadtype/threadtype.go +++ b/typedapi/types/enums/threadtype/threadtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package threadtype package threadtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L297-L303 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L301-L307 type ThreadType struct { Name string } diff --git a/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go b/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go index 7e8f3e99f1..ebd10fc73e 100644 --- a/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go +++ b/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package timeseriesmetrictype package timeseriesmetrictype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/TimeSeriesMetricType.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/TimeSeriesMetricType.ts#L20-L26 type TimeSeriesMetricType struct { Name string } diff --git a/typedapi/types/enums/timeunit/timeunit.go b/typedapi/types/enums/timeunit/timeunit.go index ebac5dfc4a..24f12876c5 100644 --- a/typedapi/types/enums/timeunit/timeunit.go +++ b/typedapi/types/enums/timeunit/timeunit.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package timeunit package timeunit import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Time.ts#L69-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Time.ts#L69-L77 type TimeUnit struct { Name string } diff --git a/typedapi/types/enums/tokenchar/tokenchar.go b/typedapi/types/enums/tokenchar/tokenchar.go index f01c27a96a..9aa1eab922 100644 --- a/typedapi/types/enums/tokenchar/tokenchar.go +++ b/typedapi/types/enums/tokenchar/tokenchar.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package tokenchar package tokenchar import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L47-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L59-L66 type TokenChar struct { Name string } diff --git a/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go b/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go index d3d5214417..6ab24490fe 100644 --- a/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go +++ b/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package tokenizationtruncate package tokenizationtruncate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L350-L354 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L338-L342 type TokenizationTruncate struct { Name string } diff --git a/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go b/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go index 839e8772c3..af8006b627 100644 --- a/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go +++ b/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package totalhitsrelation package totalhitsrelation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/hits.ts#L100-L105 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/hits.ts#L101-L106 type TotalHitsRelation struct { Name string } diff --git a/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go b/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go index ff68a3fd04..0308c42c20 100644 --- a/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go +++ b/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package trainedmodeltype package trainedmodeltype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L258-L272 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L314-L328 type TrainedModelType struct { Name string } diff --git a/typedapi/types/enums/trainingpriority/trainingpriority.go b/typedapi/types/enums/trainingpriority/trainingpriority.go index 2a8f0b3bfe..57f77c3949 100644 --- a/typedapi/types/enums/trainingpriority/trainingpriority.go +++ b/typedapi/types/enums/trainingpriority/trainingpriority.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package trainingpriority package trainingpriority import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L311-L314 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L364-L367 type TrainingPriority struct { Name string } diff --git a/typedapi/types/enums/translogdurability/translogdurability.go b/typedapi/types/enums/translogdurability/translogdurability.go index 9532ad6699..f9c71e796f 100644 --- a/typedapi/types/enums/translogdurability/translogdurability.go +++ b/typedapi/types/enums/translogdurability/translogdurability.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package translogdurability package translogdurability import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L365-L380 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L379-L394 type TranslogDurability struct { Name string } diff --git a/typedapi/types/enums/ttesttype/ttesttype.go b/typedapi/types/enums/ttesttype/ttesttype.go index 0597e5e052..b8799cd238 100644 --- a/typedapi/types/enums/ttesttype/ttesttype.go +++ b/typedapi/types/enums/ttesttype/ttesttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package ttesttype package ttesttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L322-L335 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L331-L344 type TTestType struct { Name string } diff --git a/typedapi/types/enums/type_/type_.go b/typedapi/types/enums/type_/type_.go index 148fa6e1f2..3767bbdf6b 100644 --- a/typedapi/types/enums/type_/type_.go +++ b/typedapi/types/enums/type_/type_.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package type_ package type_ import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/_types/types.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/_types/types.ts#L20-L24 type Type struct { Name string } diff --git a/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go b/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go index ff94f3a009..61f41aa7b1 100644 --- a/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go +++ b/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package unassignedinformationreason package unassignedinformationreason import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L127-L146 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L138-L157 type UnassignedInformationReason struct { Name string } @@ -67,35 +67,35 @@ func (u UnassignedInformationReason) MarshalText() (text []byte, err error) { func (u *UnassignedInformationReason) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "INDEX_CREATED": + case "index_created": *u = INDEXCREATED - case "CLUSTER_RECOVERED": + case "cluster_recovered": *u = CLUSTERRECOVERED - case "INDEX_REOPENED": + case "index_reopened": *u = INDEXREOPENED - case "DANGLING_INDEX_IMPORTED": + case "dangling_index_imported": *u = DANGLINGINDEXIMPORTED - case "NEW_INDEX_RESTORED": + case "new_index_restored": *u = NEWINDEXRESTORED - case "EXISTING_INDEX_RESTORED": + case "existing_index_restored": *u = EXISTINGINDEXRESTORED - case "REPLICA_ADDED": + case "replica_added": *u = REPLICAADDED - case "ALLOCATION_FAILED": + case "allocation_failed": *u = ALLOCATIONFAILED - case "NODE_LEFT": + case "node_left": *u = NODELEFT - case "REROUTE_CANCELLED": + case "reroute_cancelled": *u = REROUTECANCELLED - case "REINITIALIZED": + case "reinitialized": *u = REINITIALIZED - case "REALLOCATED_REPLICA": + case "reallocated_replica": *u = REALLOCATEDREPLICA - case "PRIMARY_FAILED": + case "primary_failed": *u = PRIMARYFAILED - case "FORCED_EMPTY_PRIMARY": + case "forced_empty_primary": *u = FORCEDEMPTYPRIMARY - case "MANUAL_ALLOCATION": + case "manual_allocation": *u = MANUALALLOCATION default: *u = UnassignedInformationReason{string(text)} diff --git a/typedapi/types/enums/useragentproperty/useragentproperty.go b/typedapi/types/enums/useragentproperty/useragentproperty.go index 0de99fab25..205a0718fe 100644 --- a/typedapi/types/enums/useragentproperty/useragentproperty.go +++ b/typedapi/types/enums/useragentproperty/useragentproperty.go @@ -16,38 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package useragentproperty package useragentproperty import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L266-L277 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L547-L553 type UserAgentProperty struct { Name string } var ( - NAME = UserAgentProperty{"NAME"} + Name = UserAgentProperty{"name"} - MAJOR = UserAgentProperty{"MAJOR"} + Os = UserAgentProperty{"os"} - MINOR = UserAgentProperty{"MINOR"} + Device = UserAgentProperty{"device"} - PATCH = UserAgentProperty{"PATCH"} + Original = UserAgentProperty{"original"} - OS = UserAgentProperty{"OS"} - - OSNAME = UserAgentProperty{"OS_NAME"} - - OSMAJOR = UserAgentProperty{"OS_MAJOR"} - - OSMINOR = UserAgentProperty{"OS_MINOR"} - - DEVICE = UserAgentProperty{"DEVICE"} - - BUILD = UserAgentProperty{"BUILD"} + Version = UserAgentProperty{"version"} ) func (u UserAgentProperty) MarshalText() (text []byte, err error) { @@ -57,26 +47,16 @@ func (u UserAgentProperty) MarshalText() (text []byte, err error) { func (u *UserAgentProperty) UnmarshalText(text []byte) error { switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { - case "NAME": - *u = NAME - case "MAJOR": - *u = MAJOR - case "MINOR": - *u = MINOR - case "PATCH": - *u = PATCH - case "OS": - *u = OS - case "OS_NAME": - *u = OSNAME - case "OS_MAJOR": - *u = OSMAJOR - case "OS_MINOR": - *u = OSMINOR - case "DEVICE": - *u = DEVICE - case "BUILD": - *u = BUILD + case "name": + *u = Name + case "os": + *u = Os + case "device": + *u = Device + case "original": + *u = Original + case "version": + *u = Version default: *u = UserAgentProperty{string(text)} } diff --git a/typedapi/types/enums/valuetype/valuetype.go b/typedapi/types/enums/valuetype/valuetype.go index 81ba193315..f0c842eb7f 100644 --- a/typedapi/types/enums/valuetype/valuetype.go +++ b/typedapi/types/enums/valuetype/valuetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package valuetype package valuetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L424-L435 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L436-L447 type ValueType struct { Name string } diff --git a/typedapi/types/enums/versiontype/versiontype.go b/typedapi/types/enums/versiontype/versiontype.go index 6e6a317d54..12834ec8f6 100644 --- a/typedapi/types/enums/versiontype/versiontype.go +++ b/typedapi/types/enums/versiontype/versiontype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package versiontype package versiontype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L106-L122 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L107-L126 type VersionType struct { Name string } diff --git a/typedapi/types/enums/waitforactiveshardoptions/waitforactiveshardoptions.go b/typedapi/types/enums/waitforactiveshardoptions/waitforactiveshardoptions.go index e758ce5eaf..1e3e342971 100644 --- a/typedapi/types/enums/waitforactiveshardoptions/waitforactiveshardoptions.go +++ b/typedapi/types/enums/waitforactiveshardoptions/waitforactiveshardoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package waitforactiveshardoptions package waitforactiveshardoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L305-L309 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L309-L313 type WaitForActiveShardOptions struct { Name string } diff --git a/typedapi/types/enums/waitforevents/waitforevents.go b/typedapi/types/enums/waitforevents/waitforevents.go index 0ef4932725..2052072847 100644 --- a/typedapi/types/enums/waitforevents/waitforevents.go +++ b/typedapi/types/enums/waitforevents/waitforevents.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package waitforevents package waitforevents import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L311-L318 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L315-L322 type WaitForEvents struct { Name string } diff --git a/typedapi/types/enums/watchermetric/watchermetric.go b/typedapi/types/enums/watchermetric/watchermetric.go index 5d0b89a5db..a2ee342f5d 100644 --- a/typedapi/types/enums/watchermetric/watchermetric.go +++ b/typedapi/types/enums/watchermetric/watchermetric.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package watchermetric package watchermetric import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/stats/types.ts#L42-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/stats/types.ts#L63-L69 type WatcherMetric struct { Name string } diff --git a/typedapi/types/enums/watcherstate/watcherstate.go b/typedapi/types/enums/watcherstate/watcherstate.go index b4dcbceb87..a9187645b1 100644 --- a/typedapi/types/enums/watcherstate/watcherstate.go +++ b/typedapi/types/enums/watcherstate/watcherstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package watcherstate package watcherstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/stats/types.ts#L26-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/stats/types.ts#L26-L31 type WatcherState struct { Name string } diff --git a/typedapi/types/enums/watsonxtasktype/watsonxtasktype.go b/typedapi/types/enums/watsonxtasktype/watsonxtasktype.go new file mode 100644 index 0000000000..1b5c99cc05 --- /dev/null +++ b/typedapi/types/enums/watsonxtasktype/watsonxtasktype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package watsonxtasktype +package watsonxtasktype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_watsonx/PutWatsonxRequest.ts#L72-L74 +type WatsonxTaskType struct { + Name string +} + +var ( + Textembedding = WatsonxTaskType{"text_embedding"} +) + +func (w WatsonxTaskType) MarshalText() (text []byte, err error) { + return []byte(w.String()), nil +} + +func (w *WatsonxTaskType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *w = Textembedding + default: + *w = WatsonxTaskType{string(text)} + } + + return nil +} + +func (w WatsonxTaskType) String() string { + return w.Name +} diff --git a/typedapi/types/enums/xpackcategory/xpackcategory.go b/typedapi/types/enums/xpackcategory/xpackcategory.go new file mode 100644 index 0000000000..14c651ff4a --- /dev/null +++ b/typedapi/types/enums/xpackcategory/xpackcategory.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +// Package xpackcategory +package xpackcategory + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/info/XPackInfoRequest.ts#L58-L62 +type XPackCategory struct { + Name string +} + +var ( + Build = XPackCategory{"build"} + + Features = XPackCategory{"features"} + + License = XPackCategory{"license"} +) + +func (x XPackCategory) MarshalText() (text []byte, err error) { + return []byte(x.String()), nil +} + +func (x *XPackCategory) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "build": + *x = Build + case "features": + *x = Features + case "license": + *x = License + default: + *x = XPackCategory{string(text)} + } + + return nil +} + +func (x XPackCategory) String() string { + return x.Name +} diff --git a/typedapi/types/enums/zerotermsquery/zerotermsquery.go b/typedapi/types/enums/zerotermsquery/zerotermsquery.go index 9608a32b96..11ce1dc410 100644 --- a/typedapi/types/enums/zerotermsquery/zerotermsquery.go +++ b/typedapi/types/enums/zerotermsquery/zerotermsquery.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d // Package zerotermsquery package zerotermsquery import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L569-L578 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L587-L596 type ZeroTermsQuery struct { Name string } diff --git a/typedapi/types/epochtimeunitmillis.go b/typedapi/types/epochtimeunitmillis.go index aa85fabf36..8f775f6169 100644 --- a/typedapi/types/epochtimeunitmillis.go +++ b/typedapi/types/epochtimeunitmillis.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // EpochTimeUnitMillis type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Time.ts#L40-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Time.ts#L40-L40 type EpochTimeUnitMillis int64 + +type EpochTimeUnitMillisVariant interface { + EpochTimeUnitMillisCaster() *EpochTimeUnitMillis +} diff --git a/typedapi/types/epochtimeunitseconds.go b/typedapi/types/epochtimeunitseconds.go index d424c74655..490d76673a 100644 --- a/typedapi/types/epochtimeunitseconds.go +++ b/typedapi/types/epochtimeunitseconds.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // EpochTimeUnitSeconds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Time.ts#L40-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Time.ts#L40-L40 type EpochTimeUnitSeconds int64 + +type EpochTimeUnitSecondsVariant interface { + EpochTimeUnitSecondsCaster() *EpochTimeUnitSeconds +} diff --git a/typedapi/types/eql.go b/typedapi/types/eql.go index d3fcadfdd6..ab42f8c83b 100644 --- a/typedapi/types/eql.go +++ b/typedapi/types/eql.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Eql type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L351-L354 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L361-L364 type Eql struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -103,8 +103,10 @@ func (s *Eql) UnmarshalJSON(data []byte) error { // NewEql returns a Eql. func NewEql() *Eql { r := &Eql{ - Queries: make(map[string]XpackQuery, 0), + Queries: make(map[string]XpackQuery), } return r } + +// false diff --git a/typedapi/types/eqlfeatures.go b/typedapi/types/eqlfeatures.go index 9279082127..075207ec0b 100644 --- a/typedapi/types/eqlfeatures.go +++ b/typedapi/types/eqlfeatures.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // EqlFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L99-L107 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L96-L104 type EqlFeatures struct { Event uint `json:"event"` Join uint `json:"join"` @@ -39,3 +39,5 @@ func NewEqlFeatures() *EqlFeatures { return r } + +// false diff --git a/typedapi/types/eqlfeaturesjoin.go b/typedapi/types/eqlfeaturesjoin.go index 0e280ba84f..66e63b3477 100644 --- a/typedapi/types/eqlfeaturesjoin.go +++ b/typedapi/types/eqlfeaturesjoin.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // EqlFeaturesJoin type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L109-L115 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L106-L112 type EqlFeaturesJoin struct { JoinQueriesFiveOrMore uint `json:"join_queries_five_or_more"` JoinQueriesFour uint `json:"join_queries_four"` @@ -37,3 +37,5 @@ func NewEqlFeaturesJoin() *EqlFeaturesJoin { return r } + +// false diff --git a/typedapi/types/eqlfeatureskeys.go b/typedapi/types/eqlfeatureskeys.go index ad3c332ae0..619a4e3f68 100644 --- a/typedapi/types/eqlfeatureskeys.go +++ b/typedapi/types/eqlfeatureskeys.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // EqlFeaturesKeys type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L117-L123 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L114-L120 type EqlFeaturesKeys struct { JoinKeysFiveOrMore uint `json:"join_keys_five_or_more"` JoinKeysFour uint `json:"join_keys_four"` @@ -37,3 +37,5 @@ func NewEqlFeaturesKeys() *EqlFeaturesKeys { return r } + +// false diff --git a/typedapi/types/eqlfeaturespipes.go b/typedapi/types/eqlfeaturespipes.go index ac520b5c35..eb92df029d 100644 --- a/typedapi/types/eqlfeaturespipes.go +++ b/typedapi/types/eqlfeaturespipes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // EqlFeaturesPipes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L125-L128 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L122-L125 type EqlFeaturesPipes struct { PipeHead uint `json:"pipe_head"` PipeTail uint `json:"pipe_tail"` @@ -34,3 +34,5 @@ func NewEqlFeaturesPipes() *EqlFeaturesPipes { return r } + +// false diff --git a/typedapi/types/eqlfeaturessequences.go b/typedapi/types/eqlfeaturessequences.go index ef91e4fcb6..e89e442ad8 100644 --- a/typedapi/types/eqlfeaturessequences.go +++ b/typedapi/types/eqlfeaturessequences.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // EqlFeaturesSequences type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L130-L137 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L127-L134 type EqlFeaturesSequences struct { SequenceMaxspan uint `json:"sequence_maxspan"` SequenceQueriesFiveOrMore uint `json:"sequence_queries_five_or_more"` @@ -38,3 +38,5 @@ func NewEqlFeaturesSequences() *EqlFeaturesSequences { return r } + +// false diff --git a/typedapi/types/eqlhits.go b/typedapi/types/eqlhits.go index d3ecf59135..5689fcb174 100644 --- a/typedapi/types/eqlhits.go +++ b/typedapi/types/eqlhits.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // EqlHits type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/eql/_types/EqlHits.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/eql/_types/EqlHits.ts#L25-L39 type EqlHits struct { // Events Contains events matching the query. Each object represents a matching event. Events []HitsEvent `json:"events,omitempty"` @@ -40,3 +40,5 @@ func NewEqlHits() *EqlHits { return r } + +// false diff --git a/typedapi/types/errorcause.go b/typedapi/types/errorcause.go index 0af7ec5927..1562dc92f3 100644 --- a/typedapi/types/errorcause.go +++ b/typedapi/types/errorcause.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,11 @@ import ( // ErrorCause type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Errors.ts#L25-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Errors.ts#L25-L50 type ErrorCause struct { CausedBy *ErrorCause `json:"caused_by,omitempty"` Metadata map[string]json.RawMessage `json:"-"` - // Reason A human-readable explanation of the error, in english + // Reason A human-readable explanation of the error, in English. Reason *string `json:"reason,omitempty"` RootCause []ErrorCause `json:"root_cause,omitempty"` // StackTrace The server stack trace. Present only if the `error_trace=true` parameter was @@ -168,8 +168,10 @@ func (s ErrorCause) MarshalJSON() ([]byte, error) { // NewErrorCause returns a ErrorCause. func NewErrorCause() *ErrorCause { r := &ErrorCause{ - Metadata: make(map[string]json.RawMessage, 0), + Metadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/errorresponsebase.go b/typedapi/types/errorresponsebase.go index 548e7c5ef5..0a16b50940 100644 --- a/typedapi/types/errorresponsebase.go +++ b/typedapi/types/errorresponsebase.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ErrorResponseBase type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Base.ts#L76-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Base.ts#L127-L136 type ErrorResponseBase struct { Error ErrorCause `json:"error"` Status int `json:"status"` @@ -84,3 +84,5 @@ func NewErrorResponseBase() *ErrorResponseBase { return r } + +// false diff --git a/typedapi/types/esqlcolumns.go b/typedapi/types/esqlresult.go similarity index 75% rename from typedapi/types/esqlcolumns.go rename to typedapi/types/esqlresult.go index 4112561fb8..67a650fcac 100644 --- a/typedapi/types/esqlcolumns.go +++ b/typedapi/types/esqlresult.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types -// EsqlColumns type alias. +// EsqlResult type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Binary.ts#L24-L24 -type EsqlColumns []byte +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Binary.ts#L24-L24 +type EsqlResult []byte diff --git a/typedapi/types/estoniananalyzer.go b/typedapi/types/estoniananalyzer.go new file mode 100644 index 0000000000..815ca331e0 --- /dev/null +++ b/typedapi/types/estoniananalyzer.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EstonianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L139-L143 +type EstonianAnalyzer struct { + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *EstonianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s EstonianAnalyzer) MarshalJSON() ([]byte, error) { + type innerEstonianAnalyzer EstonianAnalyzer + tmp := innerEstonianAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "estonian" + + return json.Marshal(tmp) +} + +// NewEstonianAnalyzer returns a EstonianAnalyzer. +func NewEstonianAnalyzer() *EstonianAnalyzer { + r := &EstonianAnalyzer{} + + return r +} + +// true + +type EstonianAnalyzerVariant interface { + EstonianAnalyzerCaster() *EstonianAnalyzer +} + +func (s *EstonianAnalyzer) EstonianAnalyzerCaster() *EstonianAnalyzer { + return s +} diff --git a/typedapi/types/eventdatastream.go b/typedapi/types/eventdatastream.go index 1f64fdc8d9..ef348927d3 100644 --- a/typedapi/types/eventdatastream.go +++ b/typedapi/types/eventdatastream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // EventDataStream type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/_types/BehavioralAnalytics.ts#L29-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/_types/BehavioralAnalytics.ts#L29-L31 type EventDataStream struct { Name string `json:"name"` } @@ -66,3 +66,5 @@ func NewEventDataStream() *EventDataStream { return r } + +// false diff --git a/typedapi/types/ewmamodelsettings.go b/typedapi/types/ewmamodelsettings.go index 6eee6f70e4..1df6950056 100644 --- a/typedapi/types/ewmamodelsettings.go +++ b/typedapi/types/ewmamodelsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // EwmaModelSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L267-L269 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L293-L295 type EwmaModelSettings struct { Alpha *float32 `json:"alpha,omitempty"` } @@ -78,3 +78,13 @@ func NewEwmaModelSettings() *EwmaModelSettings { return r } + +// true + +type EwmaModelSettingsVariant interface { + EwmaModelSettingsCaster() *EwmaModelSettings +} + +func (s *EwmaModelSettings) EwmaModelSettingsCaster() *EwmaModelSettings { + return s +} diff --git a/typedapi/types/ewmamovingaverageaggregation.go b/typedapi/types/ewmamovingaverageaggregation.go index eda0e17872..8777344884 100644 --- a/typedapi/types/ewmamovingaverageaggregation.go +++ b/typedapi/types/ewmamovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // EwmaMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L252-L255 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L278-L281 type EwmaMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewEwmaMovingAverageAggregation() *EwmaMovingAverageAggregation { return r } + +// true + +type EwmaMovingAverageAggregationVariant interface { + EwmaMovingAverageAggregationCaster() *EwmaMovingAverageAggregation +} + +func (s *EwmaMovingAverageAggregation) EwmaMovingAverageAggregationCaster() *EwmaMovingAverageAggregation { + return s +} diff --git a/typedapi/types/executeenrichpolicystatus.go b/typedapi/types/executeenrichpolicystatus.go index 05716ca284..f8b5614ae6 100644 --- a/typedapi/types/executeenrichpolicystatus.go +++ b/typedapi/types/executeenrichpolicystatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // ExecuteEnrichPolicyStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/execute_policy/types.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/execute_policy/types.ts#L20-L22 type ExecuteEnrichPolicyStatus struct { Phase enrichpolicyphase.EnrichPolicyPhase `json:"phase"` } @@ -37,3 +37,5 @@ func NewExecuteEnrichPolicyStatus() *ExecuteEnrichPolicyStatus { return r } + +// false diff --git a/typedapi/types/executingpolicy.go b/typedapi/types/executingpolicy.go index fd658c54ca..1ce8a801fc 100644 --- a/typedapi/types/executingpolicy.go +++ b/typedapi/types/executingpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ExecutingPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/stats/types.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/stats/types.ts#L25-L28 type ExecutingPolicy struct { Name string `json:"name"` Task TaskInfo `json:"task"` @@ -72,3 +72,5 @@ func NewExecutingPolicy() *ExecutingPolicy { return r } + +// false diff --git a/typedapi/types/executionresult.go b/typedapi/types/executionresult.go index e160ccb0ba..0702f24e05 100644 --- a/typedapi/types/executionresult.go +++ b/typedapi/types/executionresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ExecutionResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Execution.ts#L60-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Execution.ts#L60-L66 type ExecutionResult struct { Actions []ExecutionResultAction `json:"actions"` Condition ExecutionResultCondition `json:"condition"` @@ -90,3 +90,5 @@ func NewExecutionResult() *ExecutionResult { return r } + +// false diff --git a/typedapi/types/executionresultaction.go b/typedapi/types/executionresultaction.go index e2827159aa..15c053fc56 100644 --- a/typedapi/types/executionresultaction.go +++ b/typedapi/types/executionresultaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // ExecutionResultAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Execution.ts#L74-L86 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Execution.ts#L74-L86 type ExecutionResultAction struct { Email *EmailResult `json:"email,omitempty"` Error *ErrorCause `json:"error,omitempty"` @@ -137,3 +137,5 @@ func NewExecutionResultAction() *ExecutionResultAction { return r } + +// false diff --git a/typedapi/types/executionresultcondition.go b/typedapi/types/executionresultcondition.go index a373d38bb8..bda7760e7d 100644 --- a/typedapi/types/executionresultcondition.go +++ b/typedapi/types/executionresultcondition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // ExecutionResultCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Execution.ts#L68-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Execution.ts#L68-L72 type ExecutionResultCondition struct { Met bool `json:"met"` Status actionstatusoptions.ActionStatusOptions `json:"status"` @@ -91,3 +91,5 @@ func NewExecutionResultCondition() *ExecutionResultCondition { return r } + +// false diff --git a/typedapi/types/executionresultinput.go b/typedapi/types/executionresultinput.go index 4b6e2e4d31..403a6f49f9 100644 --- a/typedapi/types/executionresultinput.go +++ b/typedapi/types/executionresultinput.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,7 +29,7 @@ import ( // ExecutionResultInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Execution.ts#L88-L92 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Execution.ts#L88-L92 type ExecutionResultInput struct { Payload map[string]json.RawMessage `json:"payload"` Status actionstatusoptions.ActionStatusOptions `json:"status"` @@ -39,8 +39,10 @@ type ExecutionResultInput struct { // NewExecutionResultInput returns a ExecutionResultInput. func NewExecutionResultInput() *ExecutionResultInput { r := &ExecutionResultInput{ - Payload: make(map[string]json.RawMessage, 0), + Payload: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/executionstate.go b/typedapi/types/executionstate.go index df45c8ff58..cab2c4d447 100644 --- a/typedapi/types/executionstate.go +++ b/typedapi/types/executionstate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExecutionState type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L120-L124 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L114-L118 type ExecutionState struct { Reason *string `json:"reason,omitempty"` Successful bool `json:"successful"` @@ -95,3 +95,13 @@ func NewExecutionState() *ExecutionState { return r } + +// true + +type ExecutionStateVariant interface { + ExecutionStateCaster() *ExecutionState +} + +func (s *ExecutionState) ExecutionStateCaster() *ExecutionState { + return s +} diff --git a/typedapi/types/executionthreadpool.go b/typedapi/types/executionthreadpool.go index d9f463b775..c5ba684068 100644 --- a/typedapi/types/executionthreadpool.go +++ b/typedapi/types/executionthreadpool.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,9 +31,12 @@ import ( // ExecutionThreadPool type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Execution.ts#L94-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Execution.ts#L94-L103 type ExecutionThreadPool struct { - MaxSize int64 `json:"max_size"` + // MaxSize The largest size of the execution thread pool, which indicates the largest + // number of concurrent running watches. + MaxSize int64 `json:"max_size"` + // QueueSize The number of watches that were triggered and are currently queued. QueueSize int64 `json:"queue_size"` } @@ -93,3 +96,5 @@ func NewExecutionThreadPool() *ExecutionThreadPool { return r } + +// false diff --git a/typedapi/types/existsquery.go b/typedapi/types/existsquery.go index aada53340b..25324432ea 100644 --- a/typedapi/types/existsquery.go +++ b/typedapi/types/existsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExistsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L37-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L38-L46 type ExistsQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -103,3 +103,13 @@ func NewExistsQuery() *ExistsQuery { return r } + +// true + +type ExistsQueryVariant interface { + ExistsQueryCaster() *ExistsQuery +} + +func (s *ExistsQuery) ExistsQueryCaster() *ExistsQuery { + return s +} diff --git a/typedapi/types/expandwildcards.go b/typedapi/types/expandwildcards.go index 324dca7bac..1bbba97c0e 100644 --- a/typedapi/types/expandwildcards.go +++ b/typedapi/types/expandwildcards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,9 @@ import ( // ExpandWildcards type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L217-L217 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L221-L221 type ExpandWildcards []expandwildcard.ExpandWildcard + +type ExpandWildcardsVariant interface { + ExpandWildcardsCaster() *ExpandWildcards +} diff --git a/typedapi/types/explainanalyzetoken.go b/typedapi/types/explainanalyzetoken.go index ea2bd1a2b6..47ffea3a5d 100644 --- a/typedapi/types/explainanalyzetoken.go +++ b/typedapi/types/explainanalyzetoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExplainAnalyzeToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/analyze/types.ts#L52-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/analyze/types.ts#L52-L67 type ExplainAnalyzeToken struct { Bytes string `json:"bytes"` EndOffset int64 `json:"end_offset"` @@ -235,8 +235,10 @@ func (s ExplainAnalyzeToken) MarshalJSON() ([]byte, error) { // NewExplainAnalyzeToken returns a ExplainAnalyzeToken. func NewExplainAnalyzeToken() *ExplainAnalyzeToken { r := &ExplainAnalyzeToken{ - ExplainAnalyzeToken: make(map[string]json.RawMessage, 0), + ExplainAnalyzeToken: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/explanation.go b/typedapi/types/explanation.go index ebc11c829b..b7222107eb 100644 --- a/typedapi/types/explanation.go +++ b/typedapi/types/explanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Explanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/explain/types.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/explain/types.ts#L22-L26 type Explanation struct { Description string `json:"description"` Details []ExplanationDetail `json:"details"` @@ -97,3 +97,5 @@ func NewExplanation() *Explanation { return r } + +// false diff --git a/typedapi/types/explanationdetail.go b/typedapi/types/explanationdetail.go index cdd7f1fa6c..91b5f21086 100644 --- a/typedapi/types/explanationdetail.go +++ b/typedapi/types/explanationdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExplanationDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/explain/types.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/explain/types.ts#L28-L32 type ExplanationDetail struct { Description string `json:"description"` Details []ExplanationDetail `json:"details,omitempty"` @@ -97,3 +97,5 @@ func NewExplanationDetail() *ExplanationDetail { return r } + +// false diff --git a/typedapi/types/explorecontrols.go b/typedapi/types/explorecontrols.go index a45b4b4be8..8156e54e54 100644 --- a/typedapi/types/explorecontrols.go +++ b/typedapi/types/explorecontrols.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExploreControls type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/graph/_types/ExploreControls.ts#L24-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/graph/_types/ExploreControls.ts#L24-L49 type ExploreControls struct { // SampleDiversity To avoid the top-matching documents sample being dominated by a single source // of results, it is sometimes necessary to request diversity in the sample. @@ -123,3 +123,13 @@ func NewExploreControls() *ExploreControls { return r } + +// true + +type ExploreControlsVariant interface { + ExploreControlsCaster() *ExploreControls +} + +func (s *ExploreControls) ExploreControlsCaster() *ExploreControls { + return s +} diff --git a/typedapi/types/exponentialaveragecalculationcontext.go b/typedapi/types/exponentialaveragecalculationcontext.go new file mode 100644 index 0000000000..46ec11e114 --- /dev/null +++ b/typedapi/types/exponentialaveragecalculationcontext.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ExponentialAverageCalculationContext type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L204-L208 +type ExponentialAverageCalculationContext struct { + IncrementalMetricValueMs Float64 `json:"incremental_metric_value_ms"` + LatestTimestamp *int64 `json:"latest_timestamp,omitempty"` + PreviousExponentialAverageMs Float64 `json:"previous_exponential_average_ms,omitempty"` +} + +func (s *ExponentialAverageCalculationContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "incremental_metric_value_ms": + if err := dec.Decode(&s.IncrementalMetricValueMs); err != nil { + return fmt.Errorf("%s | %w", "IncrementalMetricValueMs", err) + } + + case "latest_timestamp": + if err := dec.Decode(&s.LatestTimestamp); err != nil { + return fmt.Errorf("%s | %w", "LatestTimestamp", err) + } + + case "previous_exponential_average_ms": + if err := dec.Decode(&s.PreviousExponentialAverageMs); err != nil { + return fmt.Errorf("%s | %w", "PreviousExponentialAverageMs", err) + } + + } + } + return nil +} + +// NewExponentialAverageCalculationContext returns a ExponentialAverageCalculationContext. +func NewExponentialAverageCalculationContext() *ExponentialAverageCalculationContext { + r := &ExponentialAverageCalculationContext{} + + return r +} + +// false diff --git a/typedapi/types/extendedboundsdouble.go b/typedapi/types/extendedboundsdouble.go index 8951eb8c62..6513e2a37e 100644 --- a/typedapi/types/extendedboundsdouble.go +++ b/typedapi/types/extendedboundsdouble.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExtendedBoundsdouble type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L491-L500 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L508-L517 type ExtendedBoundsdouble struct { // Max Maximum value for the bound. Max *Float64 `json:"max,omitempty"` @@ -97,3 +97,13 @@ func NewExtendedBoundsdouble() *ExtendedBoundsdouble { return r } + +// true + +type ExtendedBoundsdoubleVariant interface { + ExtendedBoundsdoubleCaster() *ExtendedBoundsdouble +} + +func (s *ExtendedBoundsdouble) ExtendedBoundsdoubleCaster() *ExtendedBoundsdouble { + return s +} diff --git a/typedapi/types/extendedboundsfielddatemath.go b/typedapi/types/extendedboundsfielddatemath.go index 67444c9601..26e190ee48 100644 --- a/typedapi/types/extendedboundsfielddatemath.go +++ b/typedapi/types/extendedboundsfielddatemath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ExtendedBoundsFieldDateMath type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L491-L500 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L508-L517 type ExtendedBoundsFieldDateMath struct { // Max Maximum value for the bound. Max FieldDateMath `json:"max,omitempty"` @@ -74,3 +74,13 @@ func NewExtendedBoundsFieldDateMath() *ExtendedBoundsFieldDateMath { return r } + +// true + +type ExtendedBoundsFieldDateMathVariant interface { + ExtendedBoundsFieldDateMathCaster() *ExtendedBoundsFieldDateMath +} + +func (s *ExtendedBoundsFieldDateMath) ExtendedBoundsFieldDateMathCaster() *ExtendedBoundsFieldDateMath { + return s +} diff --git a/typedapi/types/extendedmemorystats.go b/typedapi/types/extendedmemorystats.go index a3b729bcd7..e7125f8387 100644 --- a/typedapi/types/extendedmemorystats.go +++ b/typedapi/types/extendedmemorystats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExtendedMemoryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L622-L631 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L658-L667 type ExtendedMemoryStats struct { // AdjustedTotalInBytes If the amount of physical memory has been overridden using the // `es`.`total_memory_bytes` system property then this reports the overridden @@ -255,3 +255,5 @@ func NewExtendedMemoryStats() *ExtendedMemoryStats { return r } + +// false diff --git a/typedapi/types/extendedstatsaggregate.go b/typedapi/types/extendedstatsaggregate.go index 1a606b165b..6cfe75c1aa 100644 --- a/typedapi/types/extendedstatsaggregate.go +++ b/typedapi/types/extendedstatsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExtendedStatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L278-L296 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L299-L320 type ExtendedStatsAggregate struct { Avg *Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` @@ -289,3 +289,5 @@ func NewExtendedStatsAggregate() *ExtendedStatsAggregate { return r } + +// false diff --git a/typedapi/types/extendedstatsaggregation.go b/typedapi/types/extendedstatsaggregation.go index aa0c0488ca..2315ef75d9 100644 --- a/typedapi/types/extendedstatsaggregation.go +++ b/typedapi/types/extendedstatsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExtendedStatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L101-L106 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L101-L106 type ExtendedStatsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -113,3 +113,13 @@ func NewExtendedStatsAggregation() *ExtendedStatsAggregation { return r } + +// true + +type ExtendedStatsAggregationVariant interface { + ExtendedStatsAggregationCaster() *ExtendedStatsAggregation +} + +func (s *ExtendedStatsAggregation) ExtendedStatsAggregationCaster() *ExtendedStatsAggregation { + return s +} diff --git a/typedapi/types/extendedstatsbucketaggregate.go b/typedapi/types/extendedstatsbucketaggregate.go index 661f6dd762..0652e3a1be 100644 --- a/typedapi/types/extendedstatsbucketaggregate.go +++ b/typedapi/types/extendedstatsbucketaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ExtendedStatsBucketAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L298-L299 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L322-L323 type ExtendedStatsBucketAggregate struct { Avg *Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` @@ -289,3 +289,5 @@ func NewExtendedStatsBucketAggregate() *ExtendedStatsBucketAggregate { return r } + +// false diff --git a/typedapi/types/extendedstatsbucketaggregation.go b/typedapi/types/extendedstatsbucketaggregation.go index b524523648..a382ca1f85 100644 --- a/typedapi/types/extendedstatsbucketaggregation.go +++ b/typedapi/types/extendedstatsbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // ExtendedStatsBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L198-L203 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L218-L223 type ExtendedStatsBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -111,3 +111,13 @@ func NewExtendedStatsBucketAggregation() *ExtendedStatsBucketAggregation { return r } + +// true + +type ExtendedStatsBucketAggregationVariant interface { + ExtendedStatsBucketAggregationCaster() *ExtendedStatsBucketAggregation +} + +func (s *ExtendedStatsBucketAggregation) ExtendedStatsBucketAggregationCaster() *ExtendedStatsBucketAggregation { + return s +} diff --git a/typedapi/types/failprocessor.go b/typedapi/types/failprocessor.go index aa85c4aaca..f29e092673 100644 --- a/typedapi/types/failprocessor.go +++ b/typedapi/types/failprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FailProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L648-L654 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L888-L894 type FailProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -143,3 +143,13 @@ func NewFailProcessor() *FailProcessor { return r } + +// true + +type FailProcessorVariant interface { + FailProcessorCaster() *FailProcessor +} + +func (s *FailProcessor) FailProcessorCaster() *FailProcessor { + return s +} diff --git a/typedapi/types/failurestore.go b/typedapi/types/failurestore.go new file mode 100644 index 0000000000..b411f75b8c --- /dev/null +++ b/typedapi/types/failurestore.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FailureStore type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/DataStream.ts#L39-L43 +type FailureStore struct { + Enabled bool `json:"enabled"` + Indices []DataStreamIndex `json:"indices"` + RolloverOnWrite bool `json:"rollover_on_write"` +} + +func (s *FailureStore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "rollover_on_write": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RolloverOnWrite", err) + } + s.RolloverOnWrite = value + case bool: + s.RolloverOnWrite = v + } + + } + } + return nil +} + +// NewFailureStore returns a FailureStore. +func NewFailureStore() *FailureStore { + r := &FailureStore{} + + return r +} + +// false diff --git a/typedapi/types/feature.go b/typedapi/types/feature.go index cad70da81c..a10a386969 100644 --- a/typedapi/types/feature.go +++ b/typedapi/types/feature.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Feature type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/features/_types/Feature.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/features/_types/Feature.ts#L20-L23 type Feature struct { Description string `json:"description"` Name string `json:"name"` @@ -87,3 +87,5 @@ func NewFeature() *Feature { return r } + +// false diff --git a/typedapi/types/featureenabled.go b/typedapi/types/featureenabled.go index 0f776f9c26..efa6732970 100644 --- a/typedapi/types/featureenabled.go +++ b/typedapi/types/featureenabled.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FeatureEnabled type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L215-L217 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L215-L217 type FeatureEnabled struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewFeatureEnabled() *FeatureEnabled { return r } + +// true + +type FeatureEnabledVariant interface { + FeatureEnabledCaster() *FeatureEnabled +} + +func (s *FeatureEnabled) FeatureEnabledCaster() *FeatureEnabled { + return s +} diff --git a/typedapi/types/features.go b/typedapi/types/features.go index 64e3e9f59b..056d936257 100644 --- a/typedapi/types/features.go +++ b/typedapi/types/features.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,5 @@ import ( // Features type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get/IndicesGetRequest.ts#L96-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get/IndicesGetRequest.ts#L103-L103 type Features []feature.Feature diff --git a/typedapi/types/featuretoggle.go b/typedapi/types/featuretoggle.go index f59bd3f573..faa3151357 100644 --- a/typedapi/types/featuretoggle.go +++ b/typedapi/types/featuretoggle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FeatureToggle type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L40-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L37-L39 type FeatureToggle struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,5 @@ func NewFeatureToggle() *FeatureToggle { return r } + +// false diff --git a/typedapi/types/fetchprofile.go b/typedapi/types/fetchprofile.go index 4895a9eb5a..f1ce8ac953 100644 --- a/typedapi/types/fetchprofile.go +++ b/typedapi/types/fetchprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FetchProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L139-L146 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L230-L237 type FetchProfile struct { Breakdown FetchProfileBreakdown `json:"breakdown"` Children []FetchProfile `json:"children,omitempty"` @@ -111,3 +111,5 @@ func NewFetchProfile() *FetchProfile { return r } + +// false diff --git a/typedapi/types/fetchprofilebreakdown.go b/typedapi/types/fetchprofilebreakdown.go index a629a82870..cdd0d19a7f 100644 --- a/typedapi/types/fetchprofilebreakdown.go +++ b/typedapi/types/fetchprofilebreakdown.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FetchProfileBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L148-L157 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L239-L248 type FetchProfileBreakdown struct { LoadSource *int `json:"load_source,omitempty"` LoadSourceCount *int `json:"load_source_count,omitempty"` @@ -197,3 +197,5 @@ func NewFetchProfileBreakdown() *FetchProfileBreakdown { return r } + +// false diff --git a/typedapi/types/fetchprofiledebug.go b/typedapi/types/fetchprofiledebug.go index 5e425d74ea..235fae875f 100644 --- a/typedapi/types/fetchprofiledebug.go +++ b/typedapi/types/fetchprofiledebug.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FetchProfileDebug type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L159-L162 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L250-L253 type FetchProfileDebug struct { FastPath *int `json:"fast_path,omitempty"` StoredFields []string `json:"stored_fields,omitempty"` @@ -84,3 +84,5 @@ func NewFetchProfileDebug() *FetchProfileDebug { return r } + +// false diff --git a/typedapi/types/fieldaliasproperty.go b/typedapi/types/fieldaliasproperty.go index 8b0325fb36..f700b7158e 100644 --- a/typedapi/types/fieldaliasproperty.go +++ b/typedapi/types/fieldaliasproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // FieldAliasProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/specialized.ts#L55-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L64-L67 type FieldAliasProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Path *string `json:"path,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Path *string `json:"path,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -432,306 +446,323 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -746,13 +777,14 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { func (s FieldAliasProperty) MarshalJSON() ([]byte, error) { type innerFieldAliasProperty FieldAliasProperty tmp := innerFieldAliasProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Path: s.Path, - Properties: s.Properties, - Type: s.Type, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Path: s.Path, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "alias" @@ -763,10 +795,20 @@ func (s FieldAliasProperty) MarshalJSON() ([]byte, error) { // NewFieldAliasProperty returns a FieldAliasProperty. func NewFieldAliasProperty() *FieldAliasProperty { r := &FieldAliasProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type FieldAliasPropertyVariant interface { + FieldAliasPropertyCaster() *FieldAliasProperty +} + +func (s *FieldAliasProperty) FieldAliasPropertyCaster() *FieldAliasProperty { + return s +} diff --git a/typedapi/types/fieldandformat.go b/typedapi/types/fieldandformat.go index 980819b71e..cc38dbf334 100644 --- a/typedapi/types/fieldandformat.go +++ b/typedapi/types/fieldandformat.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,12 +31,12 @@ import ( // FieldAndFormat type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/abstractions.ts#L525-L539 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/abstractions.ts#L535-L549 type FieldAndFormat struct { - // Field Wildcard pattern. The request returns values for field names matching this + // Field A wildcard pattern. The request returns values for field names matching this // pattern. Field string `json:"field"` - // Format Format in which the values are returned. + // Format The format in which the values are returned. Format *string `json:"format,omitempty"` IncludeUnmapped *bool `json:"include_unmapped,omitempty"` } @@ -110,3 +110,13 @@ func NewFieldAndFormat() *FieldAndFormat { return r } + +// true + +type FieldAndFormatVariant interface { + FieldAndFormatCaster() *FieldAndFormat +} + +func (s *FieldAndFormat) FieldAndFormatCaster() *FieldAndFormat { + return s +} diff --git a/typedapi/types/fieldcapability.go b/typedapi/types/fieldcapability.go index 06c7fe5a4a..86bb10134f 100644 --- a/typedapi/types/fieldcapability.go +++ b/typedapi/types/fieldcapability.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // FieldCapability type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/field_caps/types.ts#L23-L81 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/field_caps/types.ts#L23-L81 type FieldCapability struct { // Aggregatable Whether this field can be aggregated on all indices. Aggregatable bool `json:"aggregatable"` @@ -231,3 +231,5 @@ func NewFieldCapability() *FieldCapability { return r } + +// false diff --git a/typedapi/types/fieldcollapse.go b/typedapi/types/fieldcollapse.go index 33e2a633f4..c300290629 100644 --- a/typedapi/types/fieldcollapse.go +++ b/typedapi/types/fieldcollapse.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FieldCollapse type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/FieldCollapse.ts#L24-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/FieldCollapse.ts#L24-L41 type FieldCollapse struct { Collapse *FieldCollapse `json:"collapse,omitempty"` // Field The field to collapse the result set on @@ -111,3 +111,13 @@ func NewFieldCollapse() *FieldCollapse { return r } + +// true + +type FieldCollapseVariant interface { + FieldCollapseCaster() *FieldCollapse +} + +func (s *FieldCollapse) FieldCollapseCaster() *FieldCollapse { + return s +} diff --git a/typedapi/types/fielddatafrequencyfilter.go b/typedapi/types/fielddatafrequencyfilter.go index e62b9189d9..907524f1f1 100644 --- a/typedapi/types/fielddatafrequencyfilter.go +++ b/typedapi/types/fielddatafrequencyfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FielddataFrequencyFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/FielddataFrequencyFilter.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/FielddataFrequencyFilter.ts#L22-L26 type FielddataFrequencyFilter struct { Max Float64 `json:"max"` Min Float64 `json:"min"` @@ -112,3 +112,13 @@ func NewFielddataFrequencyFilter() *FielddataFrequencyFilter { return r } + +// true + +type FielddataFrequencyFilterVariant interface { + FielddataFrequencyFilterCaster() *FielddataFrequencyFilter +} + +func (s *FielddataFrequencyFilter) FielddataFrequencyFilterCaster() *FielddataFrequencyFilter { + return s +} diff --git a/typedapi/types/fielddatarecord.go b/typedapi/types/fielddatarecord.go index c94a6a6ade..28375daf41 100644 --- a/typedapi/types/fielddatarecord.go +++ b/typedapi/types/fielddatarecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FielddataRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/fielddata/types.ts#L20-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/fielddata/types.ts#L20-L48 type FielddataRecord struct { // Field field name Field *string `json:"field,omitempty"` @@ -145,3 +145,5 @@ func NewFielddataRecord() *FielddataRecord { return r } + +// false diff --git a/typedapi/types/fielddatastats.go b/typedapi/types/fielddatastats.go index e49906bfb8..ce38d58ed4 100644 --- a/typedapi/types/fielddatastats.go +++ b/typedapi/types/fielddatastats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FielddataStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L111-L116 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L114-L119 type FielddataStats struct { Evictions *int64 `json:"evictions,omitempty"` Fields map[string]FieldMemoryUsage `json:"fields,omitempty"` @@ -105,8 +105,10 @@ func (s *FielddataStats) UnmarshalJSON(data []byte) error { // NewFielddataStats returns a FielddataStats. func NewFielddataStats() *FielddataStats { r := &FielddataStats{ - Fields: make(map[string]FieldMemoryUsage, 0), + Fields: make(map[string]FieldMemoryUsage), } return r } + +// false diff --git a/typedapi/types/fielddatemath.go b/typedapi/types/fielddatemath.go index cf8bcc85d3..83f53af4d2 100644 --- a/typedapi/types/fielddatemath.go +++ b/typedapi/types/fielddatemath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // Float64 // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L298-L305 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L309-L316 type FieldDateMath any + +type FieldDateMathVariant interface { + FieldDateMathCaster() *FieldDateMath +} diff --git a/typedapi/types/fieldlookup.go b/typedapi/types/fieldlookup.go index f32439d19a..6a8ffac754 100644 --- a/typedapi/types/fieldlookup.go +++ b/typedapi/types/fieldlookup.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // FieldLookup type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/abstractions.ts#L429-L446 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/abstractions.ts#L436-L453 type FieldLookup struct { // Id `id` of the document. Id string `json:"id"` @@ -88,3 +88,13 @@ func NewFieldLookup() *FieldLookup { return r } + +// true + +type FieldLookupVariant interface { + FieldLookupCaster() *FieldLookup +} + +func (s *FieldLookup) FieldLookupCaster() *FieldLookup { + return s +} diff --git a/typedapi/types/fieldmapping.go b/typedapi/types/fieldmapping.go index 34e620a134..0376a3caf5 100644 --- a/typedapi/types/fieldmapping.go +++ b/typedapi/types/fieldmapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FieldMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/meta-fields.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/meta-fields.ts#L24-L27 type FieldMapping struct { FullName string `json:"full_name"` Mapping map[string]Property `json:"mapping"` @@ -83,301 +83,313 @@ func (s *FieldMapping) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Mapping | %w", err) } s.Mapping[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Mapping[key] = oo } @@ -391,8 +403,10 @@ func (s *FieldMapping) UnmarshalJSON(data []byte) error { // NewFieldMapping returns a FieldMapping. func NewFieldMapping() *FieldMapping { r := &FieldMapping{ - Mapping: make(map[string]Property, 0), + Mapping: make(map[string]Property), } return r } + +// false diff --git a/typedapi/types/fieldmemoryusage.go b/typedapi/types/fieldmemoryusage.go index c0ac59a956..37054bdd91 100644 --- a/typedapi/types/fieldmemoryusage.go +++ b/typedapi/types/fieldmemoryusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FieldMemoryUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L118-L121 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L121-L124 type FieldMemoryUsage struct { MemorySize ByteSize `json:"memory_size,omitempty"` MemorySizeInBytes int64 `json:"memory_size_in_bytes"` @@ -83,3 +83,5 @@ func NewFieldMemoryUsage() *FieldMemoryUsage { return r } + +// false diff --git a/typedapi/types/fieldmetric.go b/typedapi/types/fieldmetric.go index 1af6176855..311b9d5321 100644 --- a/typedapi/types/fieldmetric.go +++ b/typedapi/types/fieldmetric.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // FieldMetric type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/_types/Metric.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/_types/Metric.ts#L30-L35 type FieldMetric struct { // Field The field to collect metrics for. This must be a numeric of some kind. Field string `json:"field"` @@ -77,3 +77,13 @@ func NewFieldMetric() *FieldMetric { return r } + +// true + +type FieldMetricVariant interface { + FieldMetricCaster() *FieldMetric +} + +func (s *FieldMetric) FieldMetricCaster() *FieldMetric { + return s +} diff --git a/typedapi/types/fieldnamesfield.go b/typedapi/types/fieldnamesfield.go index 9c590e137a..e4a31cedfd 100644 --- a/typedapi/types/fieldnamesfield.go +++ b/typedapi/types/fieldnamesfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FieldNamesField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/meta-fields.ts#L42-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/meta-fields.ts#L42-L44 type FieldNamesField struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewFieldNamesField() *FieldNamesField { return r } + +// true + +type FieldNamesFieldVariant interface { + FieldNamesFieldCaster() *FieldNamesField +} + +func (s *FieldNamesField) FieldNamesFieldCaster() *FieldNamesField { + return s +} diff --git a/typedapi/types/fieldrule.go b/typedapi/types/fieldrule.go index f10ff247a3..e07e6b37aa 100644 --- a/typedapi/types/fieldrule.go +++ b/typedapi/types/fieldrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,11 +30,12 @@ import ( // FieldRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/RoleMappingRule.ts#L36-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RoleMappingRule.ts#L35-L43 type FieldRule struct { - Dn []string `json:"dn,omitempty"` - Groups []string `json:"groups,omitempty"` - Username []string `json:"username,omitempty"` + AdditionalFieldRuleProperty map[string]json.RawMessage `json:"-"` + Dn []string `json:"dn,omitempty"` + Groups []string `json:"groups,omitempty"` + Username []string `json:"username,omitempty"` } func (s *FieldRule) UnmarshalJSON(data []byte) error { @@ -100,14 +101,68 @@ func (s *FieldRule) UnmarshalJSON(data []byte) error { } } + default: + + if key, ok := t.(string); ok { + if s.AdditionalFieldRuleProperty == nil { + s.AdditionalFieldRuleProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalFieldRuleProperty", err) + } + s.AdditionalFieldRuleProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s FieldRule) MarshalJSON() ([]byte, error) { + type opt FieldRule + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalFieldRuleProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalFieldRuleProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewFieldRule returns a FieldRule. func NewFieldRule() *FieldRule { - r := &FieldRule{} + r := &FieldRule{ + AdditionalFieldRuleProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type FieldRuleVariant interface { + FieldRuleCaster() *FieldRule +} + +func (s *FieldRule) FieldRuleCaster() *FieldRule { + return s +} diff --git a/typedapi/types/fields.go b/typedapi/types/fields.go index ed1c75a5f4..b9ce5b2d28 100644 --- a/typedapi/types/fields.go +++ b/typedapi/types/fields.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Fields type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L140-L140 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L144-L144 type Fields []string + +type FieldsVariant interface { + FieldsCaster() *Fields +} diff --git a/typedapi/types/fieldsecurity.go b/typedapi/types/fieldsecurity.go index 66cc1cafe3..28a1aadbfa 100644 --- a/typedapi/types/fieldsecurity.go +++ b/typedapi/types/fieldsecurity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // FieldSecurity type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/FieldSecurity.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/FieldSecurity.ts#L22-L25 type FieldSecurity struct { Except []string `json:"except,omitempty"` Grant []string `json:"grant,omitempty"` @@ -94,3 +94,13 @@ func NewFieldSecurity() *FieldSecurity { return r } + +// true + +type FieldSecurityVariant interface { + FieldSecurityCaster() *FieldSecurity +} + +func (s *FieldSecurity) FieldSecurityCaster() *FieldSecurity { + return s +} diff --git a/typedapi/types/fieldsizeusage.go b/typedapi/types/fieldsizeusage.go index 05140a305e..ddc9c70001 100644 --- a/typedapi/types/fieldsizeusage.go +++ b/typedapi/types/fieldsizeusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FieldSizeUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L92-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L95-L98 type FieldSizeUsage struct { Size ByteSize `json:"size,omitempty"` SizeInBytes int64 `json:"size_in_bytes"` @@ -83,3 +83,5 @@ func NewFieldSizeUsage() *FieldSizeUsage { return r } + +// false diff --git a/typedapi/types/fieldsort.go b/typedapi/types/fieldsort.go index 8be94ba723..21d8f6763e 100644 --- a/typedapi/types/fieldsort.go +++ b/typedapi/types/fieldsort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -36,7 +36,7 @@ import ( // FieldSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L44-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L43-L52 type FieldSort struct { Format *string `json:"format,omitempty"` Missing Missing `json:"missing,omitempty"` @@ -120,3 +120,13 @@ func NewFieldSort() *FieldSort { return r } + +// true + +type FieldSortVariant interface { + FieldSortCaster() *FieldSort +} + +func (s *FieldSort) FieldSortCaster() *FieldSort { + return s +} diff --git a/typedapi/types/fieldstat.go b/typedapi/types/fieldstat.go index 08ff3323b0..7ce56ff27a 100644 --- a/typedapi/types/fieldstat.go +++ b/typedapi/types/fieldstat.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FieldStat type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/text_structure/find_structure/types.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/_types/Structure.ts#L23-L33 type FieldStat struct { Cardinality int `json:"cardinality"` Count int `json:"count"` @@ -195,3 +195,5 @@ func NewFieldStat() *FieldStat { return r } + +// false diff --git a/typedapi/types/fieldstatistics.go b/typedapi/types/fieldstatistics.go index b83a81b22c..90ccc559be 100644 --- a/typedapi/types/fieldstatistics.go +++ b/typedapi/types/fieldstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FieldStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/termvectors/types.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/termvectors/types.ts#L28-L32 type FieldStatistics struct { DocCount int `json:"doc_count"` SumDocFreq int64 `json:"sum_doc_freq"` @@ -110,3 +110,5 @@ func NewFieldStatistics() *FieldStatistics { return r } + +// false diff --git a/typedapi/types/fieldsuggester.go b/typedapi/types/fieldsuggester.go index 54781df19e..8cdea3170f 100644 --- a/typedapi/types/fieldsuggester.go +++ b/typedapi/types/fieldsuggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,8 +31,9 @@ import ( // FieldSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L109-L142 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L109-L142 type FieldSuggester struct { + AdditionalFieldSuggesterProperty map[string]json.RawMessage `json:"-"` // Completion Provides auto-complete/search-as-you-type functionality. Completion *CompletionSuggester `json:"completion,omitempty"` // Phrase Provides access to word alternatives on a per token basis within a certain @@ -115,14 +116,68 @@ func (s *FieldSuggester) UnmarshalJSON(data []byte) error { } s.Text = &o + default: + + if key, ok := t.(string); ok { + if s.AdditionalFieldSuggesterProperty == nil { + s.AdditionalFieldSuggesterProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalFieldSuggesterProperty", err) + } + s.AdditionalFieldSuggesterProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s FieldSuggester) MarshalJSON() ([]byte, error) { + type opt FieldSuggester + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalFieldSuggesterProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalFieldSuggesterProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewFieldSuggester returns a FieldSuggester. func NewFieldSuggester() *FieldSuggester { - r := &FieldSuggester{} + r := &FieldSuggester{ + AdditionalFieldSuggesterProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type FieldSuggesterVariant interface { + FieldSuggesterCaster() *FieldSuggester +} + +func (s *FieldSuggester) FieldSuggesterCaster() *FieldSuggester { + return s +} diff --git a/typedapi/types/fieldsummary.go b/typedapi/types/fieldsummary.go index 5487fa9505..4f6ff23a0d 100644 --- a/typedapi/types/fieldsummary.go +++ b/typedapi/types/fieldsummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // FieldSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L57-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L57-L66 type FieldSummary struct { Any uint `json:"any"` DocValues uint `json:"doc_values"` @@ -40,3 +40,5 @@ func NewFieldSummary() *FieldSummary { return r } + +// false diff --git a/typedapi/types/fieldsusagebody.go b/typedapi/types/fieldsusagebody.go deleted file mode 100644 index 5c9153c800..0000000000 --- a/typedapi/types/fieldsusagebody.go +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "encoding/json" - "fmt" -) - -// FieldsUsageBody type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L32-L39 -type FieldsUsageBody struct { - FieldsUsageBody map[string]UsageStatsIndex `json:"-"` - Shards_ ShardStatistics `json:"_shards"` -} - -// MarhsalJSON overrides marshalling for types with additional properties -func (s FieldsUsageBody) MarshalJSON() ([]byte, error) { - type opt FieldsUsageBody - // We transform the struct to a map without the embedded additional properties map - tmp := make(map[string]any, 0) - - data, err := json.Marshal(opt(s)) - if err != nil { - return nil, err - } - err = json.Unmarshal(data, &tmp) - if err != nil { - return nil, err - } - - // We inline the additional fields from the underlying map - for key, value := range s.FieldsUsageBody { - tmp[fmt.Sprintf("%s", key)] = value - } - delete(tmp, "FieldsUsageBody") - - data, err = json.Marshal(tmp) - if err != nil { - return nil, err - } - - return data, nil -} - -// NewFieldsUsageBody returns a FieldsUsageBody. -func NewFieldsUsageBody() *FieldsUsageBody { - r := &FieldsUsageBody{ - FieldsUsageBody: make(map[string]UsageStatsIndex, 0), - } - - return r -} diff --git a/typedapi/types/fieldtypes.go b/typedapi/types/fieldtypes.go index e1366e222e..00d1bb6b12 100644 --- a/typedapi/types/fieldtypes.go +++ b/typedapi/types/fieldtypes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FieldTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L136-L167 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L136-L167 type FieldTypes struct { // Count The number of occurrences of the field type in selected nodes. Count int `json:"count"` @@ -176,3 +176,5 @@ func NewFieldTypes() *FieldTypes { return r } + +// false diff --git a/typedapi/types/fieldtypesmappings.go b/typedapi/types/fieldtypesmappings.go index eb01ab2a48..c1339e5c80 100644 --- a/typedapi/types/fieldtypesmappings.go +++ b/typedapi/types/fieldtypesmappings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FieldTypesMappings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L109-L134 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L109-L134 type FieldTypesMappings struct { // FieldTypes Contains statistics about field data types used in selected nodes. FieldTypes []FieldTypes `json:"field_types"` @@ -136,3 +136,5 @@ func NewFieldTypesMappings() *FieldTypesMappings { return r } + +// false diff --git a/typedapi/types/fieldvalue.go b/typedapi/types/fieldvalue.go index 71e412852b..f907f56caa 100644 --- a/typedapi/types/fieldvalue.go +++ b/typedapi/types/fieldvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,5 +29,9 @@ package types // nil // json.RawMessage // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L25-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L25-L37 type FieldValue any + +type FieldValueVariant interface { + FieldValueCaster() *FieldValue +} diff --git a/typedapi/types/fieldvaluefactorscorefunction.go b/typedapi/types/fieldvaluefactorscorefunction.go index 5aac134dba..5e8b26b7bb 100644 --- a/typedapi/types/fieldvaluefactorscorefunction.go +++ b/typedapi/types/fieldvaluefactorscorefunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // FieldValueFactorScoreFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L136-L155 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L149-L168 type FieldValueFactorScoreFunction struct { // Factor Optional factor to multiply the field value with. Factor *Float64 `json:"factor,omitempty"` @@ -115,3 +115,13 @@ func NewFieldValueFactorScoreFunction() *FieldValueFactorScoreFunction { return r } + +// true + +type FieldValueFactorScoreFunctionVariant interface { + FieldValueFactorScoreFunctionCaster() *FieldValueFactorScoreFunction +} + +func (s *FieldValueFactorScoreFunction) FieldValueFactorScoreFunctionCaster() *FieldValueFactorScoreFunction { + return s +} diff --git a/typedapi/types/filecountsnapshotstats.go b/typedapi/types/filecountsnapshotstats.go index e1aed2cdd7..6d3b066ccf 100644 --- a/typedapi/types/filecountsnapshotstats.go +++ b/typedapi/types/filecountsnapshotstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FileCountSnapshotStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/FileCountSnapshotStats.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/FileCountSnapshotStats.ts#L22-L25 type FileCountSnapshotStats struct { FileCount int `json:"file_count"` SizeInBytes int64 `json:"size_in_bytes"` @@ -94,3 +94,5 @@ func NewFileCountSnapshotStats() *FileCountSnapshotStats { return r } + +// false diff --git a/typedapi/types/filedetails.go b/typedapi/types/filedetails.go index de8a556532..c00d4667f2 100644 --- a/typedapi/types/filedetails.go +++ b/typedapi/types/filedetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FileDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L50-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L50-L54 type FileDetails struct { Length int64 `json:"length"` Name string `json:"name"` @@ -106,3 +106,5 @@ func NewFileDetails() *FileDetails { return r } + +// false diff --git a/typedapi/types/filesystem.go b/typedapi/types/filesystem.go index 42419416e1..c9ecf1a9a7 100644 --- a/typedapi/types/filesystem.go +++ b/typedapi/types/filesystem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FileSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L698-L716 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L769-L787 type FileSystem struct { // Data List of all file stores. Data []DataPathStats `json:"data,omitempty"` @@ -100,3 +100,5 @@ func NewFileSystem() *FileSystem { return r } + +// false diff --git a/typedapi/types/filesystemtotal.go b/typedapi/types/filesystemtotal.go index caf16b7bfc..142d1b8a06 100644 --- a/typedapi/types/filesystemtotal.go +++ b/typedapi/types/filesystemtotal.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FileSystemTotal type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L757-L786 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L828-L857 type FileSystemTotal struct { // Available Total disk space available to this Java virtual machine on all file stores. // Depending on OS or process level restrictions, this might appear less than @@ -163,3 +163,5 @@ func NewFileSystemTotal() *FileSystemTotal { return r } + +// false diff --git a/typedapi/types/fillmaskinferenceoptions.go b/typedapi/types/fillmaskinferenceoptions.go index 624ba9f92d..b3f7f749de 100644 --- a/typedapi/types/fillmaskinferenceoptions.go +++ b/typedapi/types/fillmaskinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FillMaskInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L266-L280 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L253-L268 type FillMaskInferenceOptions struct { // MaskToken The string/token which will be removed from incoming documents and replaced // with the inference prediction(s). @@ -50,6 +50,7 @@ type FillMaskInferenceOptions struct { ResultsField *string `json:"results_field,omitempty"` // Tokenization The tokenization options to update when inferring Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary Vocabulary `json:"vocabulary"` } func (s *FillMaskInferenceOptions) UnmarshalJSON(data []byte) error { @@ -112,6 +113,11 @@ func (s *FillMaskInferenceOptions) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Tokenization", err) } + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + } } return nil @@ -123,3 +129,13 @@ func NewFillMaskInferenceOptions() *FillMaskInferenceOptions { return r } + +// true + +type FillMaskInferenceOptionsVariant interface { + FillMaskInferenceOptionsCaster() *FillMaskInferenceOptions +} + +func (s *FillMaskInferenceOptions) FillMaskInferenceOptionsCaster() *FillMaskInferenceOptions { + return s +} diff --git a/typedapi/types/fillmaskinferenceupdateoptions.go b/typedapi/types/fillmaskinferenceupdateoptions.go index 7b10344aa1..6a05eb1d73 100644 --- a/typedapi/types/fillmaskinferenceupdateoptions.go +++ b/typedapi/types/fillmaskinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FillMaskInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L411-L418 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L399-L406 type FillMaskInferenceUpdateOptions struct { // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. NumTopClasses *int `json:"num_top_classes,omitempty"` @@ -101,3 +101,13 @@ func NewFillMaskInferenceUpdateOptions() *FillMaskInferenceUpdateOptions { return r } + +// true + +type FillMaskInferenceUpdateOptionsVariant interface { + FillMaskInferenceUpdateOptionsCaster() *FillMaskInferenceUpdateOptions +} + +func (s *FillMaskInferenceUpdateOptions) FillMaskInferenceUpdateOptionsCaster() *FillMaskInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/filteraggregate.go b/typedapi/types/filteraggregate.go index e0f4db99ae..f83c01ac42 100644 --- a/typedapi/types/filteraggregate.go +++ b/typedapi/types/filteraggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // FilterAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L499-L500 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L552-L556 type FilterAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *FilterAggregate) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s FilterAggregate) MarshalJSON() ([]byte, error) { // NewFilterAggregate returns a FilterAggregate. func NewFilterAggregate() *FilterAggregate { r := &FilterAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/filteringadvancedsnippet.go b/typedapi/types/filteringadvancedsnippet.go index fe86222482..b5cddcae0c 100644 --- a/typedapi/types/filteringadvancedsnippet.go +++ b/typedapi/types/filteringadvancedsnippet.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,11 +30,11 @@ import ( // FilteringAdvancedSnippet type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L192-L196 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L192-L196 type FilteringAdvancedSnippet struct { - CreatedAt DateTime `json:"created_at,omitempty"` - UpdatedAt DateTime `json:"updated_at,omitempty"` - Value map[string]json.RawMessage `json:"value"` + CreatedAt DateTime `json:"created_at,omitempty"` + UpdatedAt DateTime `json:"updated_at,omitempty"` + Value json.RawMessage `json:"value,omitempty"` } func (s *FilteringAdvancedSnippet) UnmarshalJSON(data []byte) error { @@ -63,9 +63,6 @@ func (s *FilteringAdvancedSnippet) UnmarshalJSON(data []byte) error { } case "value": - if s.Value == nil { - s.Value = make(map[string]json.RawMessage, 0) - } if err := dec.Decode(&s.Value); err != nil { return fmt.Errorf("%s | %w", "Value", err) } @@ -77,9 +74,17 @@ func (s *FilteringAdvancedSnippet) UnmarshalJSON(data []byte) error { // NewFilteringAdvancedSnippet returns a FilteringAdvancedSnippet. func NewFilteringAdvancedSnippet() *FilteringAdvancedSnippet { - r := &FilteringAdvancedSnippet{ - Value: make(map[string]json.RawMessage, 0), - } + r := &FilteringAdvancedSnippet{} return r } + +// true + +type FilteringAdvancedSnippetVariant interface { + FilteringAdvancedSnippetCaster() *FilteringAdvancedSnippet +} + +func (s *FilteringAdvancedSnippet) FilteringAdvancedSnippetCaster() *FilteringAdvancedSnippet { + return s +} diff --git a/typedapi/types/filteringconfig.go b/typedapi/types/filteringconfig.go index abc9c6befa..bdea2d46ff 100644 --- a/typedapi/types/filteringconfig.go +++ b/typedapi/types/filteringconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FilteringConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L209-L213 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L209-L213 type FilteringConfig struct { Active FilteringRules `json:"active"` Domain *string `json:"domain,omitempty"` @@ -86,3 +86,13 @@ func NewFilteringConfig() *FilteringConfig { return r } + +// true + +type FilteringConfigVariant interface { + FilteringConfigCaster() *FilteringConfig +} + +func (s *FilteringConfig) FilteringConfigCaster() *FilteringConfig { + return s +} diff --git a/typedapi/types/filteringrule.go b/typedapi/types/filteringrule.go index 6484e233e3..e76724e0fa 100644 --- a/typedapi/types/filteringrule.go +++ b/typedapi/types/filteringrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // FilteringRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L170-L179 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L170-L179 type FilteringRule struct { CreatedAt DateTime `json:"created_at,omitempty"` Field string `json:"field"` @@ -130,3 +130,13 @@ func NewFilteringRule() *FilteringRule { return r } + +// true + +type FilteringRuleVariant interface { + FilteringRuleCaster() *FilteringRule +} + +func (s *FilteringRule) FilteringRuleCaster() *FilteringRule { + return s +} diff --git a/typedapi/types/filteringrules.go b/typedapi/types/filteringrules.go index 4ab744343f..010818c97c 100644 --- a/typedapi/types/filteringrules.go +++ b/typedapi/types/filteringrules.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // FilteringRules type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L203-L207 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L203-L207 type FilteringRules struct { AdvancedSnippet FilteringAdvancedSnippet `json:"advanced_snippet"` Rules []FilteringRule `json:"rules"` @@ -35,3 +35,13 @@ func NewFilteringRules() *FilteringRules { return r } + +// true + +type FilteringRulesVariant interface { + FilteringRulesCaster() *FilteringRules +} + +func (s *FilteringRules) FilteringRulesCaster() *FilteringRules { + return s +} diff --git a/typedapi/types/filteringrulesvalidation.go b/typedapi/types/filteringrulesvalidation.go index a4ef477914..e145b702ef 100644 --- a/typedapi/types/filteringrulesvalidation.go +++ b/typedapi/types/filteringrulesvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // FilteringRulesValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L198-L201 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L198-L201 type FilteringRulesValidation struct { Errors []FilteringValidation `json:"errors"` State filteringvalidationstate.FilteringValidationState `json:"state"` @@ -38,3 +38,13 @@ func NewFilteringRulesValidation() *FilteringRulesValidation { return r } + +// true + +type FilteringRulesValidationVariant interface { + FilteringRulesValidationCaster() *FilteringRulesValidation +} + +func (s *FilteringRulesValidation) FilteringRulesValidationCaster() *FilteringRulesValidation { + return s +} diff --git a/typedapi/types/filteringvalidation.go b/typedapi/types/filteringvalidation.go index 0e9374221a..4b87b792e3 100644 --- a/typedapi/types/filteringvalidation.go +++ b/typedapi/types/filteringvalidation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // FilteringValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L181-L184 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L181-L184 type FilteringValidation struct { Ids []string `json:"ids"` Messages []string `json:"messages"` @@ -34,3 +34,13 @@ func NewFilteringValidation() *FilteringValidation { return r } + +// true + +type FilteringValidationVariant interface { + FilteringValidationCaster() *FilteringValidation +} + +func (s *FilteringValidation) FilteringValidationCaster() *FilteringValidation { + return s +} diff --git a/typedapi/types/filterref.go b/typedapi/types/filterref.go index ce150bd578..bd09202ead 100644 --- a/typedapi/types/filterref.go +++ b/typedapi/types/filterref.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // FilterRef type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Filter.ts#L31-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Filter.ts#L31-L41 type FilterRef struct { // FilterId The identifier for the filter. FilterId string `json:"filter_id"` @@ -77,3 +77,13 @@ func NewFilterRef() *FilterRef { return r } + +// true + +type FilterRefVariant interface { + FilterRefCaster() *FilterRef +} + +func (s *FilterRef) FilterRefCaster() *FilterRef { + return s +} diff --git a/typedapi/types/filtersaggregate.go b/typedapi/types/filtersaggregate.go index 179b043daf..cd801a75f9 100644 --- a/typedapi/types/filtersaggregate.go +++ b/typedapi/types/filtersaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // FiltersAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L572-L573 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L639-L643 type FiltersAggregate struct { Buckets BucketsFiltersBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewFiltersAggregate() *FiltersAggregate { return r } + +// false diff --git a/typedapi/types/filtersaggregation.go b/typedapi/types/filtersaggregation.go index 3d7d88aad0..bfc7eac3d7 100644 --- a/typedapi/types/filtersaggregation.go +++ b/typedapi/types/filtersaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FiltersAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L360-L380 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L374-L394 type FiltersAggregation struct { // Filters Collection of queries from which to build buckets. Filters BucketsQuery `json:"filters,omitempty"` @@ -132,3 +132,13 @@ func NewFiltersAggregation() *FiltersAggregation { return r } + +// true + +type FiltersAggregationVariant interface { + FiltersAggregationCaster() *FiltersAggregation +} + +func (s *FiltersAggregation) FiltersAggregationCaster() *FiltersAggregation { + return s +} diff --git a/typedapi/types/filtersbucket.go b/typedapi/types/filtersbucket.go index 65a6aef0c4..eb437b94a2 100644 --- a/typedapi/types/filtersbucket.go +++ b/typedapi/types/filtersbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // FiltersBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L575-L575 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L645-L645 type FiltersBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -485,6 +485,13 @@ func (s *FiltersBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -618,8 +625,10 @@ func (s FiltersBucket) MarshalJSON() ([]byte, error) { // NewFiltersBucket returns a FiltersBucket. func NewFiltersBucket() *FiltersBucket { r := &FiltersBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/fingerprintanalyzer.go b/typedapi/types/fingerprintanalyzer.go index 82c664407d..1f57072fa3 100644 --- a/typedapi/types/fingerprintanalyzer.go +++ b/typedapi/types/fingerprintanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FingerprintAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L37-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L37-L45 type FingerprintAnalyzer struct { MaxOutputSize int `json:"max_output_size"` PreserveOriginal bool `json:"preserve_original"` @@ -166,3 +166,13 @@ func NewFingerprintAnalyzer() *FingerprintAnalyzer { return r } + +// true + +type FingerprintAnalyzerVariant interface { + FingerprintAnalyzerCaster() *FingerprintAnalyzer +} + +func (s *FingerprintAnalyzer) FingerprintAnalyzerCaster() *FingerprintAnalyzer { + return s +} diff --git a/typedapi/types/fingerprintprocessor.go b/typedapi/types/fingerprintprocessor.go new file mode 100644 index 0000000000..909c2f854a --- /dev/null +++ b/typedapi/types/fingerprintprocessor.go @@ -0,0 +1,208 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fingerprintdigest" +) + +// FingerprintProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L904-L932 +type FingerprintProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Fields Array of fields to include in the fingerprint. For objects, the processor + // hashes both the field key and value. For other fields, the processor hashes + // only the field value. + Fields []string `json:"fields"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If true, the processor ignores any missing fields. If all fields are + // missing, the processor silently exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // Method The hash method used to compute the fingerprint. Must be one of MD5, SHA-1, + // SHA-256, SHA-512, or MurmurHash3. + Method *fingerprintdigest.FingerprintDigest `json:"method,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Salt Salt value for the hash function. + Salt *string `json:"salt,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Output field for the fingerprint. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *FingerprintProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "method": + if err := dec.Decode(&s.Method); err != nil { + return fmt.Errorf("%s | %w", "Method", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "salt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Salt", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Salt = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewFingerprintProcessor returns a FingerprintProcessor. +func NewFingerprintProcessor() *FingerprintProcessor { + r := &FingerprintProcessor{} + + return r +} + +// true + +type FingerprintProcessorVariant interface { + FingerprintProcessorCaster() *FingerprintProcessor +} + +func (s *FingerprintProcessor) FingerprintProcessorCaster() *FingerprintProcessor { + return s +} diff --git a/typedapi/types/fingerprinttokenfilter.go b/typedapi/types/fingerprinttokenfilter.go index 8396ae10ab..37f243042e 100644 --- a/typedapi/types/fingerprinttokenfilter.go +++ b/typedapi/types/fingerprinttokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FingerprintTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L196-L200 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L195-L199 type FingerprintTokenFilter struct { MaxOutputSize *int `json:"max_output_size,omitempty"` Separator *string `json:"separator,omitempty"` @@ -118,3 +118,13 @@ func NewFingerprintTokenFilter() *FingerprintTokenFilter { return r } + +// true + +type FingerprintTokenFilterVariant interface { + FingerprintTokenFilterCaster() *FingerprintTokenFilter +} + +func (s *FingerprintTokenFilter) FingerprintTokenFilterCaster() *FingerprintTokenFilter { + return s +} diff --git a/typedapi/types/finnishanalyzer.go b/typedapi/types/finnishanalyzer.go new file mode 100644 index 0000000000..76896c5a2f --- /dev/null +++ b/typedapi/types/finnishanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FinnishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L145-L150 +type FinnishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *FinnishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FinnishAnalyzer) MarshalJSON() ([]byte, error) { + type innerFinnishAnalyzer FinnishAnalyzer + tmp := innerFinnishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "finnish" + + return json.Marshal(tmp) +} + +// NewFinnishAnalyzer returns a FinnishAnalyzer. +func NewFinnishAnalyzer() *FinnishAnalyzer { + r := &FinnishAnalyzer{} + + return r +} + +// true + +type FinnishAnalyzerVariant interface { + FinnishAnalyzerCaster() *FinnishAnalyzer +} + +func (s *FinnishAnalyzer) FinnishAnalyzerCaster() *FinnishAnalyzer { + return s +} diff --git a/typedapi/types/flattened.go b/typedapi/types/flattened.go index a2fd911f7c..a2c1922e8d 100644 --- a/typedapi/types/flattened.go +++ b/typedapi/types/flattened.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Flattened type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L356-L358 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L366-L368 type Flattened struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -108,3 +108,5 @@ func NewFlattened() *Flattened { return r } + +// false diff --git a/typedapi/types/flattenedproperty.go b/typedapi/types/flattenedproperty.go index 9d9fe6384e..b50a3906a0 100644 --- a/typedapi/types/flattenedproperty.go +++ b/typedapi/types/flattenedproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,11 +30,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // FlattenedProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/complex.ts#L26-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/complex.ts#L26-L37 type FlattenedProperty struct { Boost *Float64 `json:"boost,omitempty"` DepthLimit *int `json:"depth_limit,omitempty"` @@ -46,12 +47,13 @@ type FlattenedProperty struct { Index *bool `json:"index,omitempty"` IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *string `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Similarity *string `json:"similarity,omitempty"` + SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { @@ -153,301 +155,313 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -527,301 +541,313 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -853,6 +879,11 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { s.SplitQueriesOnWhitespace = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -881,6 +912,7 @@ func (s FlattenedProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, Similarity: s.Similarity, SplitQueriesOnWhitespace: s.SplitQueriesOnWhitespace, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -892,10 +924,20 @@ func (s FlattenedProperty) MarshalJSON() ([]byte, error) { // NewFlattenedProperty returns a FlattenedProperty. func NewFlattenedProperty() *FlattenedProperty { r := &FlattenedProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type FlattenedPropertyVariant interface { + FlattenedPropertyCaster() *FlattenedProperty +} + +func (s *FlattenedProperty) FlattenedPropertyCaster() *FlattenedProperty { + return s +} diff --git a/typedapi/types/floatnumberproperty.go b/typedapi/types/floatnumberproperty.go index a17c6c09ce..eab5de3b90 100644 --- a/typedapi/types/floatnumberproperty.go +++ b/typedapi/types/floatnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // FloatNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L142-L145 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L146-L149 type FloatNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,13 +48,13 @@ type FloatNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *float32 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *float32 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +163,313 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -554,301 +567,313 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -859,18 +884,6 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -885,6 +898,11 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -932,8 +950,8 @@ func (s FloatNumberProperty) MarshalJSON() ([]byte, error) { OnScriptError: s.OnScriptError, Properties: s.Properties, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -947,10 +965,20 @@ func (s FloatNumberProperty) MarshalJSON() ([]byte, error) { // NewFloatNumberProperty returns a FloatNumberProperty. func NewFloatNumberProperty() *FloatNumberProperty { r := &FloatNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type FloatNumberPropertyVariant interface { + FloatNumberPropertyCaster() *FloatNumberProperty +} + +func (s *FloatNumberProperty) FloatNumberPropertyCaster() *FloatNumberProperty { + return s +} diff --git a/typedapi/types/floatrangeproperty.go b/typedapi/types/floatrangeproperty.go index efa5e6d575..3a1763374f 100644 --- a/typedapi/types/floatrangeproperty.go +++ b/typedapi/types/floatrangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // FloatRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/range.ts#L38-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/range.ts#L38-L40 type FloatRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,11 +45,11 @@ type FloatRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { @@ -150,301 +151,313 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -507,318 +520,318 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -833,6 +846,11 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -847,19 +865,19 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { func (s FloatRangeProperty) MarshalJSON() ([]byte, error) { type innerFloatRangeProperty FloatRangeProperty tmp := innerFloatRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "float_range" @@ -870,10 +888,20 @@ func (s FloatRangeProperty) MarshalJSON() ([]byte, error) { // NewFloatRangeProperty returns a FloatRangeProperty. func NewFloatRangeProperty() *FloatRangeProperty { r := &FloatRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type FloatRangePropertyVariant interface { + FloatRangePropertyCaster() *FloatRangeProperty +} + +func (s *FloatRangeProperty) FloatRangePropertyCaster() *FloatRangeProperty { + return s +} diff --git a/typedapi/types/flushstats.go b/typedapi/types/flushstats.go index 3923c75d38..81830dede6 100644 --- a/typedapi/types/flushstats.go +++ b/typedapi/types/flushstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FlushStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L123-L128 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L126-L131 type FlushStats struct { Periodic int64 `json:"periodic"` Total int64 `json:"total"` @@ -105,3 +105,5 @@ func NewFlushStats() *FlushStats { return r } + +// false diff --git a/typedapi/types/followerindex.go b/typedapi/types/followerindex.go index c14110213f..32eab82fd0 100644 --- a/typedapi/types/followerindex.go +++ b/typedapi/types/followerindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,13 +32,19 @@ import ( // FollowerIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/follow_info/types.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/follow_info/types.ts#L24-L35 type FollowerIndex struct { - FollowerIndex string `json:"follower_index"` - LeaderIndex string `json:"leader_index"` - Parameters *FollowerIndexParameters `json:"parameters,omitempty"` - RemoteCluster string `json:"remote_cluster"` - Status followerindexstatus.FollowerIndexStatus `json:"status"` + // FollowerIndex The name of the follower index. + FollowerIndex string `json:"follower_index"` + // LeaderIndex The name of the index in the leader cluster that is followed. + LeaderIndex string `json:"leader_index"` + // Parameters An object that encapsulates cross-cluster replication parameters. If the + // follower index's status is paused, this object is omitted. + Parameters *FollowerIndexParameters `json:"parameters,omitempty"` + // RemoteCluster The remote cluster that contains the leader index. + RemoteCluster string `json:"remote_cluster"` + // Status The status of the index following: `active` or `paused`. + Status followerindexstatus.FollowerIndexStatus `json:"status"` } func (s *FollowerIndex) UnmarshalJSON(data []byte) error { @@ -92,3 +98,5 @@ func NewFollowerIndex() *FollowerIndex { return r } + +// false diff --git a/typedapi/types/followerindexparameters.go b/typedapi/types/followerindexparameters.go index ed29541cbd..e0a920e7b7 100644 --- a/typedapi/types/followerindexparameters.go +++ b/typedapi/types/followerindexparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,18 +31,41 @@ import ( // FollowerIndexParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/follow_info/types.ts#L38-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/follow_info/types.ts#L42-L88 type FollowerIndexParameters struct { - MaxOutstandingReadRequests int `json:"max_outstanding_read_requests"` - MaxOutstandingWriteRequests int `json:"max_outstanding_write_requests"` - MaxReadRequestOperationCount int `json:"max_read_request_operation_count"` - MaxReadRequestSize string `json:"max_read_request_size"` - MaxRetryDelay Duration `json:"max_retry_delay"` - MaxWriteBufferCount int `json:"max_write_buffer_count"` - MaxWriteBufferSize string `json:"max_write_buffer_size"` - MaxWriteRequestOperationCount int `json:"max_write_request_operation_count"` - MaxWriteRequestSize string `json:"max_write_request_size"` - ReadPollTimeout Duration `json:"read_poll_timeout"` + // MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. + MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` + // MaxOutstandingWriteRequests The maximum number of outstanding write requests on the follower. + MaxOutstandingWriteRequests *int `json:"max_outstanding_write_requests,omitempty"` + // MaxReadRequestOperationCount The maximum number of operations to pull per read from the remote cluster. + MaxReadRequestOperationCount *int `json:"max_read_request_operation_count,omitempty"` + // MaxReadRequestSize The maximum size in bytes of per read of a batch of operations pulled from + // the remote cluster. + MaxReadRequestSize ByteSize `json:"max_read_request_size,omitempty"` + // MaxRetryDelay The maximum time to wait before retrying an operation that failed + // exceptionally. An exponential backoff strategy is employed when + // retrying. + MaxRetryDelay Duration `json:"max_retry_delay,omitempty"` + // MaxWriteBufferCount The maximum number of operations that can be queued for writing. When this + // limit is reached, reads from the remote cluster will be + // deferred until the number of queued operations goes below the limit. + MaxWriteBufferCount *int `json:"max_write_buffer_count,omitempty"` + // MaxWriteBufferSize The maximum total bytes of operations that can be queued for writing. When + // this limit is reached, reads from the remote cluster will + // be deferred until the total bytes of queued operations goes below the limit. + MaxWriteBufferSize ByteSize `json:"max_write_buffer_size,omitempty"` + // MaxWriteRequestOperationCount The maximum number of operations per bulk write request executed on the + // follower. + MaxWriteRequestOperationCount *int `json:"max_write_request_operation_count,omitempty"` + // MaxWriteRequestSize The maximum total bytes of operations per bulk write request executed on the + // follower. + MaxWriteRequestSize ByteSize `json:"max_write_request_size,omitempty"` + // ReadPollTimeout The maximum time to wait for new operations on the remote cluster when the + // follower index is synchronized with the leader index. + // When the timeout has elapsed, the poll for operations will return to the + // follower so that it can update some statistics. + // Then the follower will immediately attempt to read from the leader again. + ReadPollTimeout Duration `json:"read_poll_timeout,omitempty"` } func (s *FollowerIndexParameters) UnmarshalJSON(data []byte) error { @@ -61,19 +84,18 @@ func (s *FollowerIndexParameters) UnmarshalJSON(data []byte) error { switch t { case "max_outstanding_read_requests": - var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.Atoi(v) + value, err := strconv.ParseInt(v, 10, 64) if err != nil { return fmt.Errorf("%s | %w", "MaxOutstandingReadRequests", err) } - s.MaxOutstandingReadRequests = value + s.MaxOutstandingReadRequests = &value case float64: - f := int(v) - s.MaxOutstandingReadRequests = f + f := int64(v) + s.MaxOutstandingReadRequests = &f } case "max_outstanding_write_requests": @@ -86,10 +108,10 @@ func (s *FollowerIndexParameters) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "MaxOutstandingWriteRequests", err) } - s.MaxOutstandingWriteRequests = value + s.MaxOutstandingWriteRequests = &value case float64: f := int(v) - s.MaxOutstandingWriteRequests = f + s.MaxOutstandingWriteRequests = &f } case "max_read_request_operation_count": @@ -102,23 +124,16 @@ func (s *FollowerIndexParameters) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "MaxReadRequestOperationCount", err) } - s.MaxReadRequestOperationCount = value + s.MaxReadRequestOperationCount = &value case float64: f := int(v) - s.MaxReadRequestOperationCount = f + s.MaxReadRequestOperationCount = &f } case "max_read_request_size": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.MaxReadRequestSize); err != nil { return fmt.Errorf("%s | %w", "MaxReadRequestSize", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.MaxReadRequestSize = o case "max_retry_delay": if err := dec.Decode(&s.MaxRetryDelay); err != nil { @@ -135,23 +150,16 @@ func (s *FollowerIndexParameters) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "MaxWriteBufferCount", err) } - s.MaxWriteBufferCount = value + s.MaxWriteBufferCount = &value case float64: f := int(v) - s.MaxWriteBufferCount = f + s.MaxWriteBufferCount = &f } case "max_write_buffer_size": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.MaxWriteBufferSize); err != nil { return fmt.Errorf("%s | %w", "MaxWriteBufferSize", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.MaxWriteBufferSize = o case "max_write_request_operation_count": @@ -163,23 +171,16 @@ func (s *FollowerIndexParameters) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "MaxWriteRequestOperationCount", err) } - s.MaxWriteRequestOperationCount = value + s.MaxWriteRequestOperationCount = &value case float64: f := int(v) - s.MaxWriteRequestOperationCount = f + s.MaxWriteRequestOperationCount = &f } case "max_write_request_size": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.MaxWriteRequestSize); err != nil { return fmt.Errorf("%s | %w", "MaxWriteRequestSize", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.MaxWriteRequestSize = o case "read_poll_timeout": if err := dec.Decode(&s.ReadPollTimeout); err != nil { @@ -197,3 +198,5 @@ func NewFollowerIndexParameters() *FollowerIndexParameters { return r } + +// false diff --git a/typedapi/types/followindexstats.go b/typedapi/types/followindexstats.go index 8cc8daee5d..18b7c49dfe 100644 --- a/typedapi/types/followindexstats.go +++ b/typedapi/types/followindexstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,9 +30,11 @@ import ( // FollowIndexStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/_types/FollowIndexStats.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/_types/FollowIndexStats.ts#L30-L35 type FollowIndexStats struct { - Index string `json:"index"` + // Index The name of the follower index. + Index string `json:"index"` + // Shards An array of shard-level following task statistics. Shards []CcrShardStats `json:"shards"` } @@ -72,3 +74,5 @@ func NewFollowIndexStats() *FollowIndexStats { return r } + +// false diff --git a/typedapi/types/followstats.go b/typedapi/types/followstats.go index b70607f36e..ab17025733 100644 --- a/typedapi/types/followstats.go +++ b/typedapi/types/followstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // FollowStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/stats/types.ts.ts#L41-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/stats/types.ts.ts#L49-L51 type FollowStats struct { Indices []FollowIndexStats `json:"indices"` } @@ -33,3 +33,5 @@ func NewFollowStats() *FollowStats { return r } + +// false diff --git a/typedapi/types/forcemergeaction.go b/typedapi/types/forcemergeaction.go index 1277ddf081..bf3672dd37 100644 --- a/typedapi/types/forcemergeaction.go +++ b/typedapi/types/forcemergeaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ForceMergeAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L126-L129 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L123-L126 type ForceMergeAction struct { IndexCodec *string `json:"index_codec,omitempty"` MaxNumSegments int `json:"max_num_segments"` @@ -91,3 +91,13 @@ func NewForceMergeAction() *ForceMergeAction { return r } + +// true + +type ForceMergeActionVariant interface { + ForceMergeActionCaster() *ForceMergeAction +} + +func (s *ForceMergeAction) ForceMergeActionCaster() *ForceMergeAction { + return s +} diff --git a/typedapi/types/foreachprocessor.go b/typedapi/types/foreachprocessor.go index 77c5ea1ff8..e61027808f 100644 --- a/typedapi/types/foreachprocessor.go +++ b/typedapi/types/foreachprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ForeachProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L656-L670 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L934-L948 type ForeachProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -48,7 +48,7 @@ type ForeachProcessor struct { // OnFailure Handle failures for the processor. OnFailure []ProcessorContainer `json:"on_failure,omitempty"` // Processor Ingest processor to run on each element. - Processor *ProcessorContainer `json:"processor,omitempty"` + Processor ProcessorContainer `json:"processor"` // Tag Identifier for the processor. // Useful for debugging and metrics. Tag *string `json:"tag,omitempty"` @@ -159,3 +159,13 @@ func NewForeachProcessor() *ForeachProcessor { return r } + +// true + +type ForeachProcessorVariant interface { + ForeachProcessorCaster() *ForeachProcessor +} + +func (s *ForeachProcessor) ForeachProcessorCaster() *ForeachProcessor { + return s +} diff --git a/typedapi/types/formattablemetricaggregation.go b/typedapi/types/formattablemetricaggregation.go deleted file mode 100644 index 9201728f04..0000000000 --- a/typedapi/types/formattablemetricaggregation.go +++ /dev/null @@ -1,97 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// FormattableMetricAggregation type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L51-L53 -type FormattableMetricAggregation struct { - // Field The field on which to run the aggregation. - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - // Missing The value to apply to documents that do not have a value. - // By default, documents without a value are ignored. - Missing Missing `json:"missing,omitempty"` - Script *Script `json:"script,omitempty"` -} - -func (s *FormattableMetricAggregation) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "field": - if err := dec.Decode(&s.Field); err != nil { - return fmt.Errorf("%s | %w", "Field", err) - } - - case "format": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Format", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Format = &o - - case "missing": - if err := dec.Decode(&s.Missing); err != nil { - return fmt.Errorf("%s | %w", "Missing", err) - } - - case "script": - if err := dec.Decode(&s.Script); err != nil { - return fmt.Errorf("%s | %w", "Script", err) - } - - } - } - return nil -} - -// NewFormattableMetricAggregation returns a FormattableMetricAggregation. -func NewFormattableMetricAggregation() *FormattableMetricAggregation { - r := &FormattableMetricAggregation{} - - return r -} diff --git a/typedapi/types/foundstatus.go b/typedapi/types/foundstatus.go index c46d812c0f..f0fbfd60cc 100644 --- a/typedapi/types/foundstatus.go +++ b/typedapi/types/foundstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FoundStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/delete_privileges/types.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/delete_privileges/types.ts#L20-L22 type FoundStatus struct { Found bool `json:"found"` } @@ -76,3 +76,5 @@ func NewFoundStatus() *FoundStatus { return r } + +// false diff --git a/typedapi/types/frenchanalyzer.go b/typedapi/types/frenchanalyzer.go new file mode 100644 index 0000000000..86a8a1111d --- /dev/null +++ b/typedapi/types/frenchanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FrenchAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L152-L157 +type FrenchAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *FrenchAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FrenchAnalyzer) MarshalJSON() ([]byte, error) { + type innerFrenchAnalyzer FrenchAnalyzer + tmp := innerFrenchAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "french" + + return json.Marshal(tmp) +} + +// NewFrenchAnalyzer returns a FrenchAnalyzer. +func NewFrenchAnalyzer() *FrenchAnalyzer { + r := &FrenchAnalyzer{} + + return r +} + +// true + +type FrenchAnalyzerVariant interface { + FrenchAnalyzerCaster() *FrenchAnalyzer +} + +func (s *FrenchAnalyzer) FrenchAnalyzerCaster() *FrenchAnalyzer { + return s +} diff --git a/typedapi/types/frequencyencodingpreprocessor.go b/typedapi/types/frequencyencodingpreprocessor.go index 605a65bd66..53b63c3bf0 100644 --- a/typedapi/types/frequencyencodingpreprocessor.go +++ b/typedapi/types/frequencyencodingpreprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FrequencyEncodingPreprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L38-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L38-L42 type FrequencyEncodingPreprocessor struct { FeatureName string `json:"feature_name"` Field string `json:"field"` @@ -93,8 +93,18 @@ func (s *FrequencyEncodingPreprocessor) UnmarshalJSON(data []byte) error { // NewFrequencyEncodingPreprocessor returns a FrequencyEncodingPreprocessor. func NewFrequencyEncodingPreprocessor() *FrequencyEncodingPreprocessor { r := &FrequencyEncodingPreprocessor{ - FrequencyMap: make(map[string]Float64, 0), + FrequencyMap: make(map[string]Float64), } return r } + +// true + +type FrequencyEncodingPreprocessorVariant interface { + FrequencyEncodingPreprocessorCaster() *FrequencyEncodingPreprocessor +} + +func (s *FrequencyEncodingPreprocessor) FrequencyEncodingPreprocessorCaster() *FrequencyEncodingPreprocessor { + return s +} diff --git a/typedapi/types/frequentitemsetsaggregate.go b/typedapi/types/frequentitemsetsaggregate.go index 58e52d4618..40ee4c4f5b 100644 --- a/typedapi/types/frequentitemsetsaggregate.go +++ b/typedapi/types/frequentitemsetsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // FrequentItemSetsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L643-L644 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L722-L723 type FrequentItemSetsAggregate struct { Buckets BucketsFrequentItemSetsBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewFrequentItemSetsAggregate() *FrequentItemSetsAggregate { return r } + +// false diff --git a/typedapi/types/frequentitemsetsaggregation.go b/typedapi/types/frequentitemsetsaggregation.go index 1c28fce790..9461d35702 100644 --- a/typedapi/types/frequentitemsetsaggregation.go +++ b/typedapi/types/frequentitemsetsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FrequentItemSetsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1167-L1191 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1240-L1267 type FrequentItemSetsAggregation struct { // Fields Fields to analyze. Fields []FrequentItemSetsField `json:"fields"` @@ -129,3 +129,13 @@ func NewFrequentItemSetsAggregation() *FrequentItemSetsAggregation { return r } + +// true + +type FrequentItemSetsAggregationVariant interface { + FrequentItemSetsAggregationCaster() *FrequentItemSetsAggregation +} + +func (s *FrequentItemSetsAggregation) FrequentItemSetsAggregationCaster() *FrequentItemSetsAggregation { + return s +} diff --git a/typedapi/types/frequentitemsetsbucket.go b/typedapi/types/frequentitemsetsbucket.go index fb0fe76f0c..1b5c49307f 100644 --- a/typedapi/types/frequentitemsetsbucket.go +++ b/typedapi/types/frequentitemsetsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // FrequentItemSetsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L646-L649 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L725-L728 type FrequentItemSetsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -511,6 +511,13 @@ func (s *FrequentItemSetsBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -644,9 +651,11 @@ func (s FrequentItemSetsBucket) MarshalJSON() ([]byte, error) { // NewFrequentItemSetsBucket returns a FrequentItemSetsBucket. func NewFrequentItemSetsBucket() *FrequentItemSetsBucket { r := &FrequentItemSetsBucket{ - Aggregations: make(map[string]Aggregate, 0), - Key: make(map[string][]string, 0), + Aggregations: make(map[string]Aggregate), + Key: make(map[string][]string), } return r } + +// false diff --git a/typedapi/types/frequentitemsetsfield.go b/typedapi/types/frequentitemsetsfield.go index d40f47e3ab..4efb130f81 100644 --- a/typedapi/types/frequentitemsetsfield.go +++ b/typedapi/types/frequentitemsetsfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // FrequentItemSetsField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1153-L1165 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1226-L1238 type FrequentItemSetsField struct { // Exclude Values to exclude. // Can be regular expression strings or arrays of strings of exact terms. @@ -124,3 +124,13 @@ func NewFrequentItemSetsField() *FrequentItemSetsField { return r } + +// true + +type FrequentItemSetsFieldVariant interface { + FrequentItemSetsFieldCaster() *FrequentItemSetsField +} + +func (s *FrequentItemSetsField) FrequentItemSetsFieldCaster() *FrequentItemSetsField { + return s +} diff --git a/typedapi/types/frozenindices.go b/typedapi/types/frozenindices.go index 675d82d0b2..e3f26d3d99 100644 --- a/typedapi/types/frozenindices.go +++ b/typedapi/types/frozenindices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FrozenIndices type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L360-L362 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L370-L372 type FrozenIndices struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -107,3 +107,5 @@ func NewFrozenIndices() *FrozenIndices { return r } + +// false diff --git a/typedapi/types/functionscore.go b/typedapi/types/functionscore.go index 9a252ed4fa..019c9e276d 100644 --- a/typedapi/types/functionscore.go +++ b/typedapi/types/functionscore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,8 +31,9 @@ import ( // FunctionScore type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L213-L253 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L226-L266 type FunctionScore struct { + AdditionalFunctionScoreProperty map[string]json.RawMessage `json:"-"` // Exp Function that scores a document with a exponential decay, depending on the // distance of a numeric field value of the document from an origin. Exp DecayFunction `json:"exp,omitempty"` @@ -152,14 +153,68 @@ func (s *FunctionScore) UnmarshalJSON(data []byte) error { s.Weight = &f } + default: + + if key, ok := t.(string); ok { + if s.AdditionalFunctionScoreProperty == nil { + s.AdditionalFunctionScoreProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalFunctionScoreProperty", err) + } + s.AdditionalFunctionScoreProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s FunctionScore) MarshalJSON() ([]byte, error) { + type opt FunctionScore + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalFunctionScoreProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalFunctionScoreProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewFunctionScore returns a FunctionScore. func NewFunctionScore() *FunctionScore { - r := &FunctionScore{} + r := &FunctionScore{ + AdditionalFunctionScoreProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type FunctionScoreVariant interface { + FunctionScoreCaster() *FunctionScore +} + +func (s *FunctionScore) FunctionScoreCaster() *FunctionScore { + return s +} diff --git a/typedapi/types/functionscorequery.go b/typedapi/types/functionscorequery.go index 76526325d6..d3888efe25 100644 --- a/typedapi/types/functionscorequery.go +++ b/typedapi/types/functionscorequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // FunctionScoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L93-L122 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L105-L135 type FunctionScoreQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -169,3 +169,13 @@ func NewFunctionScoreQuery() *FunctionScoreQuery { return r } + +// true + +type FunctionScoreQueryVariant interface { + FunctionScoreQueryCaster() *FunctionScoreQuery +} + +func (s *FunctionScoreQuery) FunctionScoreQueryCaster() *FunctionScoreQuery { + return s +} diff --git a/typedapi/types/fuzziness.go b/typedapi/types/fuzziness.go index 65d89ac7f2..95ead6281d 100644 --- a/typedapi/types/fuzziness.go +++ b/typedapi/types/fuzziness.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // int // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L133-L134 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L137-L138 type Fuzziness any + +type FuzzinessVariant interface { + FuzzinessCaster() *Fuzziness +} diff --git a/typedapi/types/fuzzyquery.go b/typedapi/types/fuzzyquery.go index 5d10376d59..5571aa1411 100644 --- a/typedapi/types/fuzzyquery.go +++ b/typedapi/types/fuzzyquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // FuzzyQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L44-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L48-L86 type FuzzyQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -189,3 +189,13 @@ func NewFuzzyQuery() *FuzzyQuery { return r } + +// true + +type FuzzyQueryVariant interface { + FuzzyQueryCaster() *FuzzyQuery +} + +func (s *FuzzyQuery) FuzzyQueryCaster() *FuzzyQuery { + return s +} diff --git a/typedapi/types/galiciananalyzer.go b/typedapi/types/galiciananalyzer.go new file mode 100644 index 0000000000..c6afd487a3 --- /dev/null +++ b/typedapi/types/galiciananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GalicianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L159-L164 +type GalicianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *GalicianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GalicianAnalyzer) MarshalJSON() ([]byte, error) { + type innerGalicianAnalyzer GalicianAnalyzer + tmp := innerGalicianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "galician" + + return json.Marshal(tmp) +} + +// NewGalicianAnalyzer returns a GalicianAnalyzer. +func NewGalicianAnalyzer() *GalicianAnalyzer { + r := &GalicianAnalyzer{} + + return r +} + +// true + +type GalicianAnalyzerVariant interface { + GalicianAnalyzerCaster() *GalicianAnalyzer +} + +func (s *GalicianAnalyzer) GalicianAnalyzerCaster() *GalicianAnalyzer { + return s +} diff --git a/typedapi/types/garbagecollector.go b/typedapi/types/garbagecollector.go index f93f88d797..abe59a6844 100644 --- a/typedapi/types/garbagecollector.go +++ b/typedapi/types/garbagecollector.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // GarbageCollector type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L923-L928 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L994-L999 type GarbageCollector struct { // Collectors Contains statistics about JVM garbage collectors for the node. Collectors map[string]GarbageCollectorTotal `json:"collectors,omitempty"` @@ -31,8 +31,10 @@ type GarbageCollector struct { // NewGarbageCollector returns a GarbageCollector. func NewGarbageCollector() *GarbageCollector { r := &GarbageCollector{ - Collectors: make(map[string]GarbageCollectorTotal, 0), + Collectors: make(map[string]GarbageCollectorTotal), } return r } + +// false diff --git a/typedapi/types/garbagecollectortotal.go b/typedapi/types/garbagecollectortotal.go index a41b77d995..4c78693ff7 100644 --- a/typedapi/types/garbagecollectortotal.go +++ b/typedapi/types/garbagecollectortotal.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GarbageCollectorTotal type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L930-L943 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L1001-L1014 type GarbageCollectorTotal struct { // CollectionCount Total number of JVM garbage collectors that collect objects. CollectionCount *int64 `json:"collection_count,omitempty"` @@ -109,3 +109,5 @@ func NewGarbageCollectorTotal() *GarbageCollectorTotal { return r } + +// false diff --git a/typedapi/types/gcsrepository.go b/typedapi/types/gcsrepository.go index 768911e087..594a4343a4 100644 --- a/typedapi/types/gcsrepository.go +++ b/typedapi/types/gcsrepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GcsRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L45-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L45-L48 type GcsRepository struct { Settings GcsRepositorySettings `json:"settings"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewGcsRepository() *GcsRepository { return r } + +// true + +type GcsRepositoryVariant interface { + GcsRepositoryCaster() *GcsRepository +} + +func (s *GcsRepository) GcsRepositoryCaster() *GcsRepository { + return s +} diff --git a/typedapi/types/gcsrepositorysettings.go b/typedapi/types/gcsrepositorysettings.go index a276c3b2b5..94e5b3feb3 100644 --- a/typedapi/types/gcsrepositorysettings.go +++ b/typedapi/types/gcsrepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GcsRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L85-L91 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L85-L91 type GcsRepositorySettings struct { ApplicationName *string `json:"application_name,omitempty"` BasePath *string `json:"base_path,omitempty"` @@ -161,3 +161,13 @@ func NewGcsRepositorySettings() *GcsRepositorySettings { return r } + +// true + +type GcsRepositorySettingsVariant interface { + GcsRepositorySettingsCaster() *GcsRepositorySettings +} + +func (s *GcsRepositorySettings) GcsRepositorySettingsCaster() *GcsRepositorySettings { + return s +} diff --git a/typedapi/types/geoboundingboxquery.go b/typedapi/types/geoboundingboxquery.go index 134f326b31..d990f2cf76 100644 --- a/typedapi/types/geoboundingboxquery.go +++ b/typedapi/types/geoboundingboxquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // GeoBoundingBoxQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/geo.ts#L32-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/geo.ts#L35-L57 type GeoBoundingBoxQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -172,8 +172,18 @@ func (s GeoBoundingBoxQuery) MarshalJSON() ([]byte, error) { // NewGeoBoundingBoxQuery returns a GeoBoundingBoxQuery. func NewGeoBoundingBoxQuery() *GeoBoundingBoxQuery { r := &GeoBoundingBoxQuery{ - GeoBoundingBoxQuery: make(map[string]GeoBounds, 0), + GeoBoundingBoxQuery: make(map[string]GeoBounds), } return r } + +// true + +type GeoBoundingBoxQueryVariant interface { + GeoBoundingBoxQueryCaster() *GeoBoundingBoxQuery +} + +func (s *GeoBoundingBoxQuery) GeoBoundingBoxQueryCaster() *GeoBoundingBoxQuery { + return s +} diff --git a/typedapi/types/geobounds.go b/typedapi/types/geobounds.go index 0c89235074..199bd698e5 100644 --- a/typedapi/types/geobounds.go +++ b/typedapi/types/geobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,5 +27,9 @@ package types // TopRightBottomLeftGeoBounds // WktGeoBounds // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L135-L148 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L135-L148 type GeoBounds any + +type GeoBoundsVariant interface { + GeoBoundsCaster() *GeoBounds +} diff --git a/typedapi/types/geoboundsaggregate.go b/typedapi/types/geoboundsaggregate.go index e0599d8bf4..4240967601 100644 --- a/typedapi/types/geoboundsaggregate.go +++ b/typedapi/types/geoboundsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GeoBoundsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L303-L306 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L327-L333 type GeoBoundsAggregate struct { Bounds GeoBounds `json:"bounds,omitempty"` Meta Metadata `json:"meta,omitempty"` @@ -130,3 +130,5 @@ func NewGeoBoundsAggregate() *GeoBoundsAggregate { return r } + +// false diff --git a/typedapi/types/geoboundsaggregation.go b/typedapi/types/geoboundsaggregation.go index da05b3b2e3..2164b32300 100644 --- a/typedapi/types/geoboundsaggregation.go +++ b/typedapi/types/geoboundsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoBoundsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L108-L114 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L108-L117 type GeoBoundsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -99,3 +99,13 @@ func NewGeoBoundsAggregation() *GeoBoundsAggregation { return r } + +// true + +type GeoBoundsAggregationVariant interface { + GeoBoundsAggregationCaster() *GeoBoundsAggregation +} + +func (s *GeoBoundsAggregation) GeoBoundsAggregationCaster() *GeoBoundsAggregation { + return s +} diff --git a/typedapi/types/geocentroidaggregate.go b/typedapi/types/geocentroidaggregate.go index e9898180a6..6825348eae 100644 --- a/typedapi/types/geocentroidaggregate.go +++ b/typedapi/types/geocentroidaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoCentroidAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L308-L312 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L335-L342 type GeoCentroidAggregate struct { Count int64 `json:"count"` Location GeoLocation `json:"location,omitempty"` @@ -129,3 +129,5 @@ func NewGeoCentroidAggregate() *GeoCentroidAggregate { return r } + +// false diff --git a/typedapi/types/geocentroidaggregation.go b/typedapi/types/geocentroidaggregation.go index 5fda07b133..f17cc0600f 100644 --- a/typedapi/types/geocentroidaggregation.go +++ b/typedapi/types/geocentroidaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoCentroidAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L116-L119 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L119-L122 type GeoCentroidAggregation struct { Count *int64 `json:"count,omitempty"` // Field The field on which to run the aggregation. @@ -144,3 +144,13 @@ func NewGeoCentroidAggregation() *GeoCentroidAggregation { return r } + +// true + +type GeoCentroidAggregationVariant interface { + GeoCentroidAggregationCaster() *GeoCentroidAggregation +} + +func (s *GeoCentroidAggregation) GeoCentroidAggregationCaster() *GeoCentroidAggregation { + return s +} diff --git a/typedapi/types/geodecayfunction.go b/typedapi/types/geodecayfunction.go index 7032078565..53fa40b9c8 100644 --- a/typedapi/types/geodecayfunction.go +++ b/typedapi/types/geodecayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,7 +29,7 @@ import ( // GeoDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L197-L200 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L210-L213 type GeoDecayFunction struct { DecayFunctionBaseGeoLocationDistance map[string]DecayPlacementGeoLocationDistance `json:"-"` // MultiValueMode Determines how the distance is calculated when a field used for computing the @@ -69,8 +69,18 @@ func (s GeoDecayFunction) MarshalJSON() ([]byte, error) { // NewGeoDecayFunction returns a GeoDecayFunction. func NewGeoDecayFunction() *GeoDecayFunction { r := &GeoDecayFunction{ - DecayFunctionBaseGeoLocationDistance: make(map[string]DecayPlacementGeoLocationDistance, 0), + DecayFunctionBaseGeoLocationDistance: make(map[string]DecayPlacementGeoLocationDistance), } return r } + +// true + +type GeoDecayFunctionVariant interface { + GeoDecayFunctionCaster() *GeoDecayFunction +} + +func (s *GeoDecayFunction) GeoDecayFunctionCaster() *GeoDecayFunction { + return s +} diff --git a/typedapi/types/geodistanceaggregate.go b/typedapi/types/geodistanceaggregate.go index e2f2f0fd69..42120ce5e4 100644 --- a/typedapi/types/geodistanceaggregate.go +++ b/typedapi/types/geodistanceaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GeoDistanceAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L554-L558 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L617-L622 type GeoDistanceAggregate struct { Buckets BucketsRangeBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewGeoDistanceAggregate() *GeoDistanceAggregate { return r } + +// false diff --git a/typedapi/types/geodistanceaggregation.go b/typedapi/types/geodistanceaggregation.go index 4615ff3402..f65f6d44e1 100644 --- a/typedapi/types/geodistanceaggregation.go +++ b/typedapi/types/geodistanceaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // GeoDistanceAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L382-L405 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L396-L419 type GeoDistanceAggregation struct { // DistanceType The distance calculation type. DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` @@ -138,3 +138,13 @@ func NewGeoDistanceAggregation() *GeoDistanceAggregation { return r } + +// true + +type GeoDistanceAggregationVariant interface { + GeoDistanceAggregationCaster() *GeoDistanceAggregation +} + +func (s *GeoDistanceAggregation) GeoDistanceAggregationCaster() *GeoDistanceAggregation { + return s +} diff --git a/typedapi/types/geodistancefeaturequery.go b/typedapi/types/geodistancefeaturequery.go index 43621cf475..eb5572e3ac 100644 --- a/typedapi/types/geodistancefeaturequery.go +++ b/typedapi/types/geodistancefeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoDistanceFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L67-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L66-L69 type GeoDistanceFeatureQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -171,3 +171,13 @@ func NewGeoDistanceFeatureQuery() *GeoDistanceFeatureQuery { return r } + +// true + +type GeoDistanceFeatureQueryVariant interface { + GeoDistanceFeatureQueryCaster() *GeoDistanceFeatureQuery +} + +func (s *GeoDistanceFeatureQuery) GeoDistanceFeatureQueryCaster() *GeoDistanceFeatureQuery { + return s +} diff --git a/typedapi/types/geodistancequery.go b/typedapi/types/geodistancequery.go index 7ab6d5e316..e3eccd1b3e 100644 --- a/typedapi/types/geodistancequery.go +++ b/typedapi/types/geodistancequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // GeoDistanceQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/geo.ts#L60-L91 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/geo.ts#L64-L96 type GeoDistanceQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -183,8 +183,18 @@ func (s GeoDistanceQuery) MarshalJSON() ([]byte, error) { // NewGeoDistanceQuery returns a GeoDistanceQuery. func NewGeoDistanceQuery() *GeoDistanceQuery { r := &GeoDistanceQuery{ - GeoDistanceQuery: make(map[string]GeoLocation, 0), + GeoDistanceQuery: make(map[string]GeoLocation), } return r } + +// true + +type GeoDistanceQueryVariant interface { + GeoDistanceQueryCaster() *GeoDistanceQuery +} + +func (s *GeoDistanceQuery) GeoDistanceQueryCaster() *GeoDistanceQuery { + return s +} diff --git a/typedapi/types/geodistancesort.go b/typedapi/types/geodistancesort.go index 349cda2548..caa465f15c 100644 --- a/typedapi/types/geodistancesort.go +++ b/typedapi/types/geodistancesort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -36,7 +36,7 @@ import ( // GeoDistanceSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L59-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L58-L70 type GeoDistanceSort struct { DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` GeoDistanceSort map[string][]GeoLocation `json:"-"` @@ -161,8 +161,18 @@ func (s GeoDistanceSort) MarshalJSON() ([]byte, error) { // NewGeoDistanceSort returns a GeoDistanceSort. func NewGeoDistanceSort() *GeoDistanceSort { r := &GeoDistanceSort{ - GeoDistanceSort: make(map[string][]GeoLocation, 0), + GeoDistanceSort: make(map[string][]GeoLocation), } return r } + +// true + +type GeoDistanceSortVariant interface { + GeoDistanceSortCaster() *GeoDistanceSort +} + +func (s *GeoDistanceSort) GeoDistanceSortCaster() *GeoDistanceSort { + return s +} diff --git a/typedapi/types/geogridprocessor.go b/typedapi/types/geogridprocessor.go new file mode 100644 index 0000000000..b81080d24e --- /dev/null +++ b/typedapi/types/geogridprocessor.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geogridtargetformat" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geogridtiletype" +) + +// GeoGridProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L388-L429 +type GeoGridProcessor struct { + // ChildrenField If specified and children tiles exist, save those tile addresses to this + // field as an array of strings. + ChildrenField *string `json:"children_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to interpret as a geo-tile.= + // The field format is determined by the `tile_type`. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // NonChildrenField If specified and intersecting non-child tiles exist, save their addresses to + // this field as an array of strings. + NonChildrenField *string `json:"non_children_field,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // ParentField If specified and a parent tile exists, save that tile address to this field. + ParentField *string `json:"parent_field,omitempty"` + // PrecisionField If specified, save the tile precision (zoom) as an integer to this field. + PrecisionField *string `json:"precision_field,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the polygon shape to, by default, the `field` is updated + // in-place. + TargetField *string `json:"target_field,omitempty"` + // TargetFormat Which format to save the generated polygon in. + TargetFormat *geogridtargetformat.GeoGridTargetFormat `json:"target_format,omitempty"` + // TileType Three tile formats are understood: geohash, geotile and geohex. + TileType geogridtiletype.GeoGridTileType `json:"tile_type"` +} + +func (s *GeoGridProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "children_field": + if err := dec.Decode(&s.ChildrenField); err != nil { + return fmt.Errorf("%s | %w", "ChildrenField", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "non_children_field": + if err := dec.Decode(&s.NonChildrenField); err != nil { + return fmt.Errorf("%s | %w", "NonChildrenField", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "parent_field": + if err := dec.Decode(&s.ParentField); err != nil { + return fmt.Errorf("%s | %w", "ParentField", err) + } + + case "precision_field": + if err := dec.Decode(&s.PrecisionField); err != nil { + return fmt.Errorf("%s | %w", "PrecisionField", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + case "target_format": + if err := dec.Decode(&s.TargetFormat); err != nil { + return fmt.Errorf("%s | %w", "TargetFormat", err) + } + + case "tile_type": + if err := dec.Decode(&s.TileType); err != nil { + return fmt.Errorf("%s | %w", "TileType", err) + } + + } + } + return nil +} + +// NewGeoGridProcessor returns a GeoGridProcessor. +func NewGeoGridProcessor() *GeoGridProcessor { + r := &GeoGridProcessor{} + + return r +} + +// true + +type GeoGridProcessorVariant interface { + GeoGridProcessorCaster() *GeoGridProcessor +} + +func (s *GeoGridProcessor) GeoGridProcessorCaster() *GeoGridProcessor { + return s +} diff --git a/typedapi/types/geogridquery.go b/typedapi/types/geogridquery.go new file mode 100644 index 0000000000..3024d19d64 --- /dev/null +++ b/typedapi/types/geogridquery.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoGridQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/geo.ts#L98-L103 +type GeoGridQuery struct { + AdditionalGeoGridQueryProperty map[string]json.RawMessage `json:"-"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + Geogrid *string `json:"geogrid,omitempty"` + Geohash *string `json:"geohash,omitempty"` + Geohex *string `json:"geohex,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *GeoGridQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "geogrid": + if err := dec.Decode(&s.Geogrid); err != nil { + return fmt.Errorf("%s | %w", "Geogrid", err) + } + + case "geohash": + if err := dec.Decode(&s.Geohash); err != nil { + return fmt.Errorf("%s | %w", "Geohash", err) + } + + case "geohex": + if err := dec.Decode(&s.Geohex); err != nil { + return fmt.Errorf("%s | %w", "Geohex", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + default: + + if key, ok := t.(string); ok { + if s.AdditionalGeoGridQueryProperty == nil { + s.AdditionalGeoGridQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalGeoGridQueryProperty", err) + } + s.AdditionalGeoGridQueryProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoGridQuery) MarshalJSON() ([]byte, error) { + type opt GeoGridQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalGeoGridQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalGeoGridQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoGridQuery returns a GeoGridQuery. +func NewGeoGridQuery() *GeoGridQuery { + r := &GeoGridQuery{ + AdditionalGeoGridQueryProperty: make(map[string]json.RawMessage), + } + + return r +} + +// true + +type GeoGridQueryVariant interface { + GeoGridQueryCaster() *GeoGridQuery +} + +func (s *GeoGridQuery) GeoGridQueryCaster() *GeoGridQuery { + return s +} diff --git a/typedapi/types/geohashgridaggregate.go b/typedapi/types/geohashgridaggregate.go index 0d59e6a702..26fe846eee 100644 --- a/typedapi/types/geohashgridaggregate.go +++ b/typedapi/types/geohashgridaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GeoHashGridAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L510-L512 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L566-L568 type GeoHashGridAggregate struct { Buckets BucketsGeoHashGridBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewGeoHashGridAggregate() *GeoHashGridAggregate { return r } + +// false diff --git a/typedapi/types/geohashgridaggregation.go b/typedapi/types/geohashgridaggregation.go index b7e02cb9cc..635c01b983 100644 --- a/typedapi/types/geohashgridaggregation.go +++ b/typedapi/types/geohashgridaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoHashGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L407-L432 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L421-L449 type GeoHashGridAggregation struct { // Bounds The bounding box to filter the points in each bucket. Bounds GeoBounds `json:"bounds,omitempty"` @@ -181,3 +181,13 @@ func NewGeoHashGridAggregation() *GeoHashGridAggregation { return r } + +// true + +type GeoHashGridAggregationVariant interface { + GeoHashGridAggregationCaster() *GeoHashGridAggregation +} + +func (s *GeoHashGridAggregation) GeoHashGridAggregationCaster() *GeoHashGridAggregation { + return s +} diff --git a/typedapi/types/geohashgridbucket.go b/typedapi/types/geohashgridbucket.go index 3f4286df06..550497257a 100644 --- a/typedapi/types/geohashgridbucket.go +++ b/typedapi/types/geohashgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // GeoHashGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L514-L516 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L570-L572 type GeoHashGridBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *GeoHashGridBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s GeoHashGridBucket) MarshalJSON() ([]byte, error) { // NewGeoHashGridBucket returns a GeoHashGridBucket. func NewGeoHashGridBucket() *GeoHashGridBucket { r := &GeoHashGridBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/geohashlocation.go b/typedapi/types/geohashlocation.go index 5a94e62f1a..bb3dfb8d2d 100644 --- a/typedapi/types/geohashlocation.go +++ b/typedapi/types/geohashlocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GeoHashLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L131-L133 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L131-L133 type GeoHashLocation struct { Geohash string `json:"geohash"` } @@ -66,3 +66,13 @@ func NewGeoHashLocation() *GeoHashLocation { return r } + +// true + +type GeoHashLocationVariant interface { + GeoHashLocationCaster() *GeoHashLocation +} + +func (s *GeoHashLocation) GeoHashLocationCaster() *GeoHashLocation { + return s +} diff --git a/typedapi/types/geohashprecision.go b/typedapi/types/geohashprecision.go index d217d329bc..4951cf52ca 100644 --- a/typedapi/types/geohashprecision.go +++ b/typedapi/types/geohashprecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // int // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L86-L90 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L86-L90 type GeoHashPrecision any + +type GeoHashPrecisionVariant interface { + GeoHashPrecisionCaster() *GeoHashPrecision +} diff --git a/typedapi/types/geohexgridaggregate.go b/typedapi/types/geohexgridaggregate.go index d53747acc6..9ec1c39af1 100644 --- a/typedapi/types/geohexgridaggregate.go +++ b/typedapi/types/geohexgridaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GeoHexGridAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L526-L527 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L585-L586 type GeoHexGridAggregate struct { Buckets BucketsGeoHexGridBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewGeoHexGridAggregate() *GeoHexGridAggregate { return r } + +// false diff --git a/typedapi/types/geohexgridaggregation.go b/typedapi/types/geohexgridaggregation.go index 31740bd282..5cc3c10dcd 100644 --- a/typedapi/types/geohexgridaggregation.go +++ b/typedapi/types/geohexgridaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeohexGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L462-L487 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L479-L504 type GeohexGridAggregation struct { // Bounds Bounding box used to filter the geo-points in each bucket. Bounds GeoBounds `json:"bounds,omitempty"` @@ -189,3 +189,13 @@ func NewGeohexGridAggregation() *GeohexGridAggregation { return r } + +// true + +type GeohexGridAggregationVariant interface { + GeohexGridAggregationCaster() *GeohexGridAggregation +} + +func (s *GeohexGridAggregation) GeohexGridAggregationCaster() *GeohexGridAggregation { + return s +} diff --git a/typedapi/types/geohexgridbucket.go b/typedapi/types/geohexgridbucket.go index 4b9280fd3d..86c7d58b45 100644 --- a/typedapi/types/geohexgridbucket.go +++ b/typedapi/types/geohexgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // GeoHexGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L529-L531 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L588-L590 type GeoHexGridBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *GeoHexGridBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s GeoHexGridBucket) MarshalJSON() ([]byte, error) { // NewGeoHexGridBucket returns a GeoHexGridBucket. func NewGeoHexGridBucket() *GeoHexGridBucket { r := &GeoHexGridBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/geoipdatabaseconfigurationmetadata.go b/typedapi/types/geoipdatabaseconfigurationmetadata.go new file mode 100644 index 0000000000..6c4200eb7b --- /dev/null +++ b/typedapi/types/geoipdatabaseconfigurationmetadata.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoipDatabaseConfigurationMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/get_geoip_database/GetGeoipDatabaseResponse.ts#L29-L34 +type GeoipDatabaseConfigurationMetadata struct { + Database DatabaseConfiguration `json:"database"` + Id string `json:"id"` + ModifiedDateMillis int64 `json:"modified_date_millis"` + Version int64 `json:"version"` +} + +func (s *GeoipDatabaseConfigurationMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database": + if err := dec.Decode(&s.Database); err != nil { + return fmt.Errorf("%s | %w", "Database", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "modified_date_millis": + if err := dec.Decode(&s.ModifiedDateMillis); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDateMillis", err) + } + + case "version": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + s.Version = value + case float64: + f := int64(v) + s.Version = f + } + + } + } + return nil +} + +// NewGeoipDatabaseConfigurationMetadata returns a GeoipDatabaseConfigurationMetadata. +func NewGeoipDatabaseConfigurationMetadata() *GeoipDatabaseConfigurationMetadata { + r := &GeoipDatabaseConfigurationMetadata{} + + return r +} + +// false diff --git a/typedapi/types/geoipdownloadstatistics.go b/typedapi/types/geoipdownloadstatistics.go index e467049da6..48dea405e5 100644 --- a/typedapi/types/geoipdownloadstatistics.go +++ b/typedapi/types/geoipdownloadstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,10 +31,12 @@ import ( // GeoIpDownloadStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/geo_ip_stats/types.ts#L24-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/geo_ip_stats/types.ts#L24-L37 type GeoIpDownloadStatistics struct { - // DatabaseCount Current number of databases available for use. - DatabaseCount int `json:"database_count"` + // DatabasesCount Current number of databases available for use. + DatabasesCount int `json:"databases_count"` + // ExpiredDatabases Total number of databases not updated after 30 days + ExpiredDatabases int `json:"expired_databases"` // FailedDownloads Total number of failed database downloads. FailedDownloads int `json:"failed_downloads"` // SkippedUpdates Total number of database updates skipped. @@ -60,7 +62,7 @@ func (s *GeoIpDownloadStatistics) UnmarshalJSON(data []byte) error { switch t { - case "database_count": + case "databases_count": var tmp any dec.Decode(&tmp) @@ -68,12 +70,28 @@ func (s *GeoIpDownloadStatistics) UnmarshalJSON(data []byte) error { case string: value, err := strconv.Atoi(v) if err != nil { - return fmt.Errorf("%s | %w", "DatabaseCount", err) + return fmt.Errorf("%s | %w", "DatabasesCount", err) } - s.DatabaseCount = value + s.DatabasesCount = value case float64: f := int(v) - s.DatabaseCount = f + s.DatabasesCount = f + } + + case "expired_databases": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExpiredDatabases", err) + } + s.ExpiredDatabases = value + case float64: + f := int(v) + s.ExpiredDatabases = f } case "failed_downloads": @@ -140,3 +158,5 @@ func NewGeoIpDownloadStatistics() *GeoIpDownloadStatistics { return r } + +// false diff --git a/typedapi/types/geoipnodedatabasename.go b/typedapi/types/geoipnodedatabasename.go index b83120a0e0..e267b606bd 100644 --- a/typedapi/types/geoipnodedatabasename.go +++ b/typedapi/types/geoipnodedatabasename.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GeoIpNodeDatabaseName type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/geo_ip_stats/types.ts#L45-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/geo_ip_stats/types.ts#L47-L50 type GeoIpNodeDatabaseName struct { // Name Name of the database. Name string `json:"name"` @@ -67,3 +67,5 @@ func NewGeoIpNodeDatabaseName() *GeoIpNodeDatabaseName { return r } + +// false diff --git a/typedapi/types/geoipnodedatabases.go b/typedapi/types/geoipnodedatabases.go index 7fd97ae0d7..bb1bd5cf98 100644 --- a/typedapi/types/geoipnodedatabases.go +++ b/typedapi/types/geoipnodedatabases.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // GeoIpNodeDatabases type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/geo_ip_stats/types.ts#L37-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/geo_ip_stats/types.ts#L39-L45 type GeoIpNodeDatabases struct { // Databases Downloaded databases for the node. Databases []GeoIpNodeDatabaseName `json:"databases"` @@ -38,3 +38,5 @@ func NewGeoIpNodeDatabases() *GeoIpNodeDatabases { return r } + +// false diff --git a/typedapi/types/geoipprocessor.go b/typedapi/types/geoipprocessor.go index 24ec7f3f2f..1e32af3043 100644 --- a/typedapi/types/geoipprocessor.go +++ b/typedapi/types/geoipprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoIpProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L339-L368 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L442-L476 type GeoIpProcessor struct { // DatabaseFile The database filename referring to a database the module ships with // (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom @@ -40,6 +40,11 @@ type GeoIpProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. Description *string `json:"description,omitempty"` + // DownloadDatabaseOnPipelineCreation If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the + // missing database is downloaded when the pipeline is created. + // Else, the download is triggered by when the pipeline is used as the + // `default_pipeline` or `final_pipeline` in an index. + DownloadDatabaseOnPipelineCreation *bool `json:"download_database_on_pipeline_creation,omitempty"` // Field The field to get the ip address from for the geographical lookup. Field string `json:"field"` // FirstOnly If `true`, only the first found geoip data will be returned, even if the @@ -104,6 +109,20 @@ func (s *GeoIpProcessor) UnmarshalJSON(data []byte) error { } s.Description = &o + case "download_database_on_pipeline_creation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DownloadDatabaseOnPipelineCreation", err) + } + s.DownloadDatabaseOnPipelineCreation = &value + case bool: + s.DownloadDatabaseOnPipelineCreation = &v + } + case "field": if err := dec.Decode(&s.Field); err != nil { return fmt.Errorf("%s | %w", "Field", err) @@ -201,3 +220,13 @@ func NewGeoIpProcessor() *GeoIpProcessor { return r } + +// true + +type GeoIpProcessorVariant interface { + GeoIpProcessorCaster() *GeoIpProcessor +} + +func (s *GeoIpProcessor) GeoIpProcessorCaster() *GeoIpProcessor { + return s +} diff --git a/typedapi/types/geoline.go b/typedapi/types/geoline.go index 0bc445326d..9c8b87e561 100644 --- a/typedapi/types/geoline.go +++ b/typedapi/types/geoline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoLine type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L56-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L56-L62 type GeoLine struct { // Coordinates Array of `[lon, lat]` coordinates Coordinates [][]Float64 `json:"coordinates"` @@ -82,3 +82,5 @@ func NewGeoLine() *GeoLine { return r } + +// false diff --git a/typedapi/types/geolineaggregate.go b/typedapi/types/geolineaggregate.go index 07b1a7cb46..fac23a60c0 100644 --- a/typedapi/types/geolineaggregate.go +++ b/typedapi/types/geolineaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoLineAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L791-L798 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L902-L912 type GeoLineAggregate struct { Geometry GeoLine `json:"geometry"` Meta Metadata `json:"meta,omitempty"` @@ -92,3 +92,5 @@ func NewGeoLineAggregate() *GeoLineAggregate { return r } + +// false diff --git a/typedapi/types/geolineaggregation.go b/typedapi/types/geolineaggregation.go index b39c387311..e6088bc6d6 100644 --- a/typedapi/types/geolineaggregation.go +++ b/typedapi/types/geolineaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // GeoLineAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L121-L146 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L124-L149 type GeoLineAggregation struct { // IncludeSort When `true`, returns an additional array of the sort values in the feature // properties. @@ -123,3 +123,13 @@ func NewGeoLineAggregation() *GeoLineAggregation { return r } + +// true + +type GeoLineAggregationVariant interface { + GeoLineAggregationCaster() *GeoLineAggregation +} + +func (s *GeoLineAggregation) GeoLineAggregationCaster() *GeoLineAggregation { + return s +} diff --git a/typedapi/types/geolinepoint.go b/typedapi/types/geolinepoint.go index e5f15c910f..757e0969c4 100644 --- a/typedapi/types/geolinepoint.go +++ b/typedapi/types/geolinepoint.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GeoLinePoint type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L155-L160 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L158-L163 type GeoLinePoint struct { // Field The name of the geo_point field. Field string `json:"field"` @@ -67,3 +67,13 @@ func NewGeoLinePoint() *GeoLinePoint { return r } + +// true + +type GeoLinePointVariant interface { + GeoLinePointCaster() *GeoLinePoint +} + +func (s *GeoLinePoint) GeoLinePointCaster() *GeoLinePoint { + return s +} diff --git a/typedapi/types/geolinesort.go b/typedapi/types/geolinesort.go index 4116dc16a3..a2c40c6c17 100644 --- a/typedapi/types/geolinesort.go +++ b/typedapi/types/geolinesort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GeoLineSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L148-L153 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L151-L156 type GeoLineSort struct { // Field The name of the numeric field to use as the sort key for ordering the points. Field string `json:"field"` @@ -67,3 +67,13 @@ func NewGeoLineSort() *GeoLineSort { return r } + +// true + +type GeoLineSortVariant interface { + GeoLineSortCaster() *GeoLineSort +} + +func (s *GeoLineSort) GeoLineSortCaster() *GeoLineSort { + return s +} diff --git a/typedapi/types/geolocation.go b/typedapi/types/geolocation.go index ab69019fb0..10f66321b0 100644 --- a/typedapi/types/geolocation.go +++ b/typedapi/types/geolocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,5 +27,9 @@ package types // []Float64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L104-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L104-L118 type GeoLocation any + +type GeoLocationVariant interface { + GeoLocationCaster() *GeoLocation +} diff --git a/typedapi/types/geopointproperty.go b/typedapi/types/geopointproperty.go index 4a661c119f..34a8f59e4e 100644 --- a/typedapi/types/geopointproperty.go +++ b/typedapi/types/geopointproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,11 +30,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // GeoPointProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/geo.ts#L24-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/geo.ts#L24-L32 type GeoPointProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -45,14 +46,14 @@ type GeoPointProperty struct { IgnoreZValue *bool `json:"ignore_z_value,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue GeoLocation `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue GeoLocation `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { @@ -124,301 +125,313 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -559,301 +572,313 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -864,18 +889,6 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -890,6 +903,11 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -904,22 +922,22 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { func (s GeoPointProperty) MarshalJSON() ([]byte, error) { type innerGeoPointProperty GeoPointProperty tmp := innerGeoPointProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - IgnoreZValue: s.IgnoreZValue, - Index: s.Index, - Meta: s.Meta, - NullValue: s.NullValue, - OnScriptError: s.OnScriptError, - Properties: s.Properties, - Script: s.Script, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "geo_point" @@ -930,10 +948,20 @@ func (s GeoPointProperty) MarshalJSON() ([]byte, error) { // NewGeoPointProperty returns a GeoPointProperty. func NewGeoPointProperty() *GeoPointProperty { r := &GeoPointProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type GeoPointPropertyVariant interface { + GeoPointPropertyCaster() *GeoPointProperty +} + +func (s *GeoPointProperty) GeoPointPropertyCaster() *GeoPointProperty { + return s +} diff --git a/typedapi/types/geopolygonpoints.go b/typedapi/types/geopolygonpoints.go index 47ccd5b46c..a59d12b300 100644 --- a/typedapi/types/geopolygonpoints.go +++ b/typedapi/types/geopolygonpoints.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // GeoPolygonPoints type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/geo.ts#L93-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/geo.ts#L105-L107 type GeoPolygonPoints struct { Points []GeoLocation `json:"points"` } @@ -33,3 +33,13 @@ func NewGeoPolygonPoints() *GeoPolygonPoints { return r } + +// true + +type GeoPolygonPointsVariant interface { + GeoPolygonPointsCaster() *GeoPolygonPoints +} + +func (s *GeoPolygonPoints) GeoPolygonPointsCaster() *GeoPolygonPoints { + return s +} diff --git a/typedapi/types/geopolygonquery.go b/typedapi/types/geopolygonquery.go index d25c8943cd..ac32e2998a 100644 --- a/typedapi/types/geopolygonquery.go +++ b/typedapi/types/geopolygonquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // GeoPolygonQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/geo.ts#L97-L108 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/geo.ts#L109-L121 type GeoPolygonQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -159,8 +159,18 @@ func (s GeoPolygonQuery) MarshalJSON() ([]byte, error) { // NewGeoPolygonQuery returns a GeoPolygonQuery. func NewGeoPolygonQuery() *GeoPolygonQuery { r := &GeoPolygonQuery{ - GeoPolygonQuery: make(map[string]GeoPolygonPoints, 0), + GeoPolygonQuery: make(map[string]GeoPolygonPoints), } return r } + +// true + +type GeoPolygonQueryVariant interface { + GeoPolygonQueryCaster() *GeoPolygonQuery +} + +func (s *GeoPolygonQuery) GeoPolygonQueryCaster() *GeoPolygonQuery { + return s +} diff --git a/typedapi/types/georesults.go b/typedapi/types/georesults.go index ae12c2d21c..c0c2ab1705 100644 --- a/typedapi/types/georesults.go +++ b/typedapi/types/georesults.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,12 +31,12 @@ import ( // GeoResults type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Anomaly.ts#L145-L154 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Anomaly.ts#L146-L155 type GeoResults struct { // ActualPoint The actual value for the bucket formatted as a `geo_point`. - ActualPoint string `json:"actual_point"` + ActualPoint *string `json:"actual_point,omitempty"` // TypicalPoint The typical value for the bucket formatted as a `geo_point`. - TypicalPoint string `json:"typical_point"` + TypicalPoint *string `json:"typical_point,omitempty"` } func (s *GeoResults) UnmarshalJSON(data []byte) error { @@ -64,7 +64,7 @@ func (s *GeoResults) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.ActualPoint = o + s.ActualPoint = &o case "typical_point": var tmp json.RawMessage @@ -76,7 +76,7 @@ func (s *GeoResults) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.TypicalPoint = o + s.TypicalPoint = &o } } @@ -89,3 +89,5 @@ func NewGeoResults() *GeoResults { return r } + +// false diff --git a/typedapi/types/geoshapefieldquery.go b/typedapi/types/geoshapefieldquery.go index f2033b7ef0..1d01ebcf87 100644 --- a/typedapi/types/geoshapefieldquery.go +++ b/typedapi/types/geoshapefieldquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // GeoShapeFieldQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/geo.ts#L115-L126 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/geo.ts#L128-L139 type GeoShapeFieldQuery struct { // IndexedShape Query using an indexed shape retrieved from the the specified document and // path. @@ -83,3 +83,13 @@ func NewGeoShapeFieldQuery() *GeoShapeFieldQuery { return r } + +// true + +type GeoShapeFieldQueryVariant interface { + GeoShapeFieldQueryCaster() *GeoShapeFieldQuery +} + +func (s *GeoShapeFieldQuery) GeoShapeFieldQueryCaster() *GeoShapeFieldQuery { + return s +} diff --git a/typedapi/types/geoshapeproperty.go b/typedapi/types/geoshapeproperty.go index 23769cb39f..2d3863acff 100644 --- a/typedapi/types/geoshapeproperty.go +++ b/typedapi/types/geoshapeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geostrategy" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // GeoShapeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/geo.ts#L41-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/geo.ts#L41-L54 type GeoShapeProperty struct { Coerce *bool `json:"coerce,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -46,13 +47,13 @@ type GeoShapeProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` IgnoreZValue *bool `json:"ignore_z_value,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Strategy *geostrategy.GeoStrategy `json:"strategy,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + Strategy *geostrategy.GeoStrategy `json:"strategy,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { @@ -138,301 +139,313 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -514,318 +527,318 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -845,6 +858,11 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Strategy", err) } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -859,21 +877,21 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { func (s GeoShapeProperty) MarshalJSON() ([]byte, error) { type innerGeoShapeProperty GeoShapeProperty tmp := innerGeoShapeProperty{ - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - IgnoreZValue: s.IgnoreZValue, - Meta: s.Meta, - Orientation: s.Orientation, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Strategy: s.Strategy, - Type: s.Type, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + Orientation: s.Orientation, + Properties: s.Properties, + Store: s.Store, + Strategy: s.Strategy, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "geo_shape" @@ -884,10 +902,20 @@ func (s GeoShapeProperty) MarshalJSON() ([]byte, error) { // NewGeoShapeProperty returns a GeoShapeProperty. func NewGeoShapeProperty() *GeoShapeProperty { r := &GeoShapeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type GeoShapePropertyVariant interface { + GeoShapePropertyCaster() *GeoShapeProperty +} + +func (s *GeoShapeProperty) GeoShapePropertyCaster() *GeoShapeProperty { + return s +} diff --git a/typedapi/types/geoshapequery.go b/typedapi/types/geoshapequery.go index c5b51c16b3..8c3d3d5cfe 100644 --- a/typedapi/types/geoshapequery.go +++ b/typedapi/types/geoshapequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoShapeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/geo.ts#L128-L143 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/geo.ts#L141-L157 type GeoShapeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -154,8 +154,18 @@ func (s GeoShapeQuery) MarshalJSON() ([]byte, error) { // NewGeoShapeQuery returns a GeoShapeQuery. func NewGeoShapeQuery() *GeoShapeQuery { r := &GeoShapeQuery{ - GeoShapeQuery: make(map[string]GeoShapeFieldQuery, 0), + GeoShapeQuery: make(map[string]GeoShapeFieldQuery), } return r } + +// true + +type GeoShapeQueryVariant interface { + GeoShapeQueryCaster() *GeoShapeQuery +} + +func (s *GeoShapeQuery) GeoShapeQueryCaster() *GeoShapeQuery { + return s +} diff --git a/typedapi/types/geotilegridaggregate.go b/typedapi/types/geotilegridaggregate.go index 14f0f9815b..20203ce310 100644 --- a/typedapi/types/geotilegridaggregate.go +++ b/typedapi/types/geotilegridaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GeoTileGridAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L518-L520 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L574-L579 type GeoTileGridAggregate struct { Buckets BucketsGeoTileGridBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewGeoTileGridAggregate() *GeoTileGridAggregate { return r } + +// false diff --git a/typedapi/types/geotilegridaggregation.go b/typedapi/types/geotilegridaggregation.go index f15cd87c05..fac566c9d4 100644 --- a/typedapi/types/geotilegridaggregation.go +++ b/typedapi/types/geotilegridaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GeoTileGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L434-L460 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L451-L477 type GeoTileGridAggregation struct { // Bounds A bounding box to filter the geo-points or geo-shapes in each bucket. Bounds GeoBounds `json:"bounds,omitempty"` @@ -181,3 +181,13 @@ func NewGeoTileGridAggregation() *GeoTileGridAggregation { return r } + +// true + +type GeoTileGridAggregationVariant interface { + GeoTileGridAggregationCaster() *GeoTileGridAggregation +} + +func (s *GeoTileGridAggregation) GeoTileGridAggregationCaster() *GeoTileGridAggregation { + return s +} diff --git a/typedapi/types/geotilegridbucket.go b/typedapi/types/geotilegridbucket.go index 651e4c8543..24f9aeadf7 100644 --- a/typedapi/types/geotilegridbucket.go +++ b/typedapi/types/geotilegridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // GeoTileGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L522-L524 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L581-L583 type GeoTileGridBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *GeoTileGridBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s GeoTileGridBucket) MarshalJSON() ([]byte, error) { // NewGeoTileGridBucket returns a GeoTileGridBucket. func NewGeoTileGridBucket() *GeoTileGridBucket { r := &GeoTileGridBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/germananalyzer.go b/typedapi/types/germananalyzer.go new file mode 100644 index 0000000000..04e9816ee6 --- /dev/null +++ b/typedapi/types/germananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GermanAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L166-L171 +type GermanAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *GermanAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GermanAnalyzer) MarshalJSON() ([]byte, error) { + type innerGermanAnalyzer GermanAnalyzer + tmp := innerGermanAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "german" + + return json.Marshal(tmp) +} + +// NewGermanAnalyzer returns a GermanAnalyzer. +func NewGermanAnalyzer() *GermanAnalyzer { + r := &GermanAnalyzer{} + + return r +} + +// true + +type GermanAnalyzerVariant interface { + GermanAnalyzerCaster() *GermanAnalyzer +} + +func (s *GermanAnalyzer) GermanAnalyzerCaster() *GermanAnalyzer { + return s +} diff --git a/typedapi/types/getmigrationfeature.go b/typedapi/types/getmigrationfeature.go index af82de81a7..32a673c9bd 100644 --- a/typedapi/types/getmigrationfeature.go +++ b/typedapi/types/getmigrationfeature.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // GetMigrationFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L37-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L37-L42 type GetMigrationFeature struct { FeatureName string `json:"feature_name"` Indices []MigrationFeatureIndexInfo `json:"indices"` @@ -94,3 +94,5 @@ func NewGetMigrationFeature() *GetMigrationFeature { return r } + +// false diff --git a/typedapi/types/getresult.go b/typedapi/types/getresult.go index 436d1d2b23..36d3a48aac 100644 --- a/typedapi/types/getresult.go +++ b/typedapi/types/getresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,17 +31,32 @@ import ( // GetResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get/types.ts#L25-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get/types.ts#L25-L67 type GetResult struct { - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Found bool `json:"found"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - Routing_ *string `json:"_routing,omitempty"` - SeqNo_ *int64 `json:"_seq_no,omitempty"` - Source_ json.RawMessage `json:"_source,omitempty"` - Version_ *int64 `json:"_version,omitempty"` + // Fields If the `stored_fields` parameter is set to `true` and `found` is `true`, it + // contains the document fields stored in the index. + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Found Indicates whether the document exists. + Found bool `json:"found"` + // Id_ The unique identifier for the document. + Id_ string `json:"_id"` + Ignored_ []string `json:"_ignored,omitempty"` + // Index_ The name of the index the document belongs to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Routing_ The explicit routing, if set. + Routing_ *string `json:"_routing,omitempty"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Source_ If `found` is `true`, it contains the document data formatted in JSON. + // If the `_source` parameter is set to `false` or the `stored_fields` parameter + // is set to `true`, it is excluded. + Source_ json.RawMessage `json:"_source,omitempty"` + // Version_ The document version, which is ncremented each time the document is updated. + Version_ *int64 `json:"_version,omitempty"` } func (s *GetResult) UnmarshalJSON(data []byte) error { @@ -86,6 +101,11 @@ func (s *GetResult) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Id_", err) } + case "_ignored": + if err := dec.Decode(&s.Ignored_); err != nil { + return fmt.Errorf("%s | %w", "Ignored_", err) + } + case "_index": if err := dec.Decode(&s.Index_); err != nil { return fmt.Errorf("%s | %w", "Index_", err) @@ -141,8 +161,10 @@ func (s *GetResult) UnmarshalJSON(data []byte) error { // NewGetResult returns a GetResult. func NewGetResult() *GetResult { r := &GetResult{ - Fields: make(map[string]json.RawMessage, 0), + Fields: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/getscriptcontext.go b/typedapi/types/getscriptcontext.go index 9b868dae00..3ddb7ab20d 100644 --- a/typedapi/types/getscriptcontext.go +++ b/typedapi/types/getscriptcontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GetScriptContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get_script_context/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get_script_context/types.ts#L22-L25 type GetScriptContext struct { Methods []ContextMethod `json:"methods"` Name string `json:"name"` @@ -72,3 +72,5 @@ func NewGetScriptContext() *GetScriptContext { return r } + +// false diff --git a/typedapi/types/getstats.go b/typedapi/types/getstats.go index 4764b3a030..5892f5430c 100644 --- a/typedapi/types/getstats.go +++ b/typedapi/types/getstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GetStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L130-L141 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L133-L144 type GetStats struct { Current int64 `json:"current"` ExistsTime Duration `json:"exists_time,omitempty"` @@ -161,3 +161,5 @@ func NewGetStats() *GetStats { return r } + +// false diff --git a/typedapi/types/getuserprofileerrors.go b/typedapi/types/getuserprofileerrors.go index 12fae27266..0aa237333d 100644 --- a/typedapi/types/getuserprofileerrors.go +++ b/typedapi/types/getuserprofileerrors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GetUserProfileErrors type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_user_profile/types.ts#L25-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_user_profile/types.ts#L25-L28 type GetUserProfileErrors struct { Count int64 `json:"count"` Details map[string]ErrorCause `json:"details"` @@ -83,8 +83,10 @@ func (s *GetUserProfileErrors) UnmarshalJSON(data []byte) error { // NewGetUserProfileErrors returns a GetUserProfileErrors. func NewGetUserProfileErrors() *GetUserProfileErrors { r := &GetUserProfileErrors{ - Details: make(map[string]ErrorCause, 0), + Details: make(map[string]ErrorCause), } return r } + +// false diff --git a/typedapi/types/globalaggregate.go b/typedapi/types/globalaggregate.go index 1499d0bae7..19f62f3a4f 100644 --- a/typedapi/types/globalaggregate.go +++ b/typedapi/types/globalaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // GlobalAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L496-L497 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L546-L550 type GlobalAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *GlobalAggregate) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s GlobalAggregate) MarshalJSON() ([]byte, error) { // NewGlobalAggregate returns a GlobalAggregate. func NewGlobalAggregate() *GlobalAggregate { r := &GlobalAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/globalaggregation.go b/typedapi/types/globalaggregation.go index db509dc2a8..927b7217d0 100644 --- a/typedapi/types/globalaggregation.go +++ b/typedapi/types/globalaggregation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // GlobalAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L489-L489 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L506-L506 type GlobalAggregation struct { } @@ -32,3 +32,13 @@ func NewGlobalAggregation() *GlobalAggregation { return r } + +// true + +type GlobalAggregationVariant interface { + GlobalAggregationCaster() *GlobalAggregation +} + +func (s *GlobalAggregation) GlobalAggregationCaster() *GlobalAggregation { + return s +} diff --git a/typedapi/types/globalprivilege.go b/typedapi/types/globalprivilege.go index 2d6e11c05e..951da2bc5a 100644 --- a/typedapi/types/globalprivilege.go +++ b/typedapi/types/globalprivilege.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // GlobalPrivilege type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L336-L338 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L430-L432 type GlobalPrivilege struct { Application ApplicationGlobalUserPrivileges `json:"application"` } @@ -33,3 +33,13 @@ func NewGlobalPrivilege() *GlobalPrivilege { return r } + +// true + +type GlobalPrivilegeVariant interface { + GlobalPrivilegeCaster() *GlobalPrivilege +} + +func (s *GlobalPrivilege) GlobalPrivilegeCaster() *GlobalPrivilege { + return s +} diff --git a/typedapi/types/googlenormalizeddistanceheuristic.go b/typedapi/types/googlenormalizeddistanceheuristic.go index c319570802..d229b72aa3 100644 --- a/typedapi/types/googlenormalizeddistanceheuristic.go +++ b/typedapi/types/googlenormalizeddistanceheuristic.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GoogleNormalizedDistanceHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L748-L753 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L793-L798 type GoogleNormalizedDistanceHeuristic struct { // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a // different set of documents that you want to compare to. @@ -78,3 +78,13 @@ func NewGoogleNormalizedDistanceHeuristic() *GoogleNormalizedDistanceHeuristic { return r } + +// true + +type GoogleNormalizedDistanceHeuristicVariant interface { + GoogleNormalizedDistanceHeuristicCaster() *GoogleNormalizedDistanceHeuristic +} + +func (s *GoogleNormalizedDistanceHeuristic) GoogleNormalizedDistanceHeuristicCaster() *GoogleNormalizedDistanceHeuristic { + return s +} diff --git a/typedapi/types/grantapikey.go b/typedapi/types/grantapikey.go index fa6e523c0e..830d62f495 100644 --- a/typedapi/types/grantapikey.go +++ b/typedapi/types/grantapikey.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // GrantApiKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/grant_api_key/types.ts#L25-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/grant_api_key/types.ts#L25-L45 type GrantApiKey struct { // Expiration Expiration time for the API key. By default, API keys never expire. Expiration *string `json:"expiration,omitempty"` @@ -41,7 +41,6 @@ type GrantApiKey struct { Metadata Metadata `json:"metadata,omitempty"` Name string `json:"name"` // RoleDescriptors The role descriptors for this API key. - // This parameter is optional. // When it is not specified or is an empty array, the API key has a point in // time snapshot of permissions of the specified user or access token. // If you supply role descriptors, the resultant permissions are an intersection @@ -111,3 +110,13 @@ func NewGrantApiKey() *GrantApiKey { return r } + +// true + +type GrantApiKeyVariant interface { + GrantApiKeyCaster() *GrantApiKey +} + +func (s *GrantApiKey) GrantApiKeyCaster() *GrantApiKey { + return s +} diff --git a/typedapi/types/greaterthanvalidation.go b/typedapi/types/greaterthanvalidation.go index 931020f7c0..ce2f40d9ec 100644 --- a/typedapi/types/greaterthanvalidation.go +++ b/typedapi/types/greaterthanvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GreaterThanValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L63-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L63-L66 type GreaterThanValidation struct { Constraint Float64 `json:"constraint"` Type string `json:"type,omitempty"` @@ -97,3 +97,13 @@ func NewGreaterThanValidation() *GreaterThanValidation { return r } + +// true + +type GreaterThanValidationVariant interface { + GreaterThanValidationCaster() *GreaterThanValidation +} + +func (s *GreaterThanValidation) GreaterThanValidationCaster() *GreaterThanValidation { + return s +} diff --git a/typedapi/types/greekanalyzer.go b/typedapi/types/greekanalyzer.go new file mode 100644 index 0000000000..c51a6d33cf --- /dev/null +++ b/typedapi/types/greekanalyzer.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GreekAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L173-L177 +type GreekAnalyzer struct { + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *GreekAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GreekAnalyzer) MarshalJSON() ([]byte, error) { + type innerGreekAnalyzer GreekAnalyzer + tmp := innerGreekAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "greek" + + return json.Marshal(tmp) +} + +// NewGreekAnalyzer returns a GreekAnalyzer. +func NewGreekAnalyzer() *GreekAnalyzer { + r := &GreekAnalyzer{} + + return r +} + +// true + +type GreekAnalyzerVariant interface { + GreekAnalyzerCaster() *GreekAnalyzer +} + +func (s *GreekAnalyzer) GreekAnalyzerCaster() *GreekAnalyzer { + return s +} diff --git a/typedapi/types/grokprocessor.go b/typedapi/types/grokprocessor.go index a2ff9f93fe..d7ffaa8bb4 100644 --- a/typedapi/types/grokprocessor.go +++ b/typedapi/types/grokprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,14 @@ import ( // GrokProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L672-L697 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L950-L981 type GrokProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. Description *string `json:"description,omitempty"` + // EcsCompatibility Must be disabled or v1. If v1, the processor uses patterns with Elastic + // Common Schema (ECS) field names. + EcsCompatibility *string `json:"ecs_compatibility,omitempty"` // Field The field to use for grok expression parsing. Field string `json:"field"` // If Conditionally execute the processor. @@ -90,6 +93,18 @@ func (s *GrokProcessor) UnmarshalJSON(data []byte) error { } s.Description = &o + case "ecs_compatibility": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "EcsCompatibility", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EcsCompatibility = &o + case "field": if err := dec.Decode(&s.Field); err != nil { return fmt.Errorf("%s | %w", "Field", err) @@ -187,8 +202,18 @@ func (s *GrokProcessor) UnmarshalJSON(data []byte) error { // NewGrokProcessor returns a GrokProcessor. func NewGrokProcessor() *GrokProcessor { r := &GrokProcessor{ - PatternDefinitions: make(map[string]string, 0), + PatternDefinitions: make(map[string]string), } return r } + +// true + +type GrokProcessorVariant interface { + GrokProcessorCaster() *GrokProcessor +} + +func (s *GrokProcessor) GrokProcessorCaster() *GrokProcessor { + return s +} diff --git a/typedapi/types/groupings.go b/typedapi/types/groupings.go index d7ac5d49f7..18b4e3dac6 100644 --- a/typedapi/types/groupings.go +++ b/typedapi/types/groupings.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Groupings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/_types/Groupings.ts#L24-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/_types/Groupings.ts#L24-L40 type Groupings struct { // DateHistogram A date histogram group aggregates a date field into time-based buckets. // This group is mandatory; you currently cannot roll up documents without a @@ -45,3 +45,13 @@ func NewGroupings() *Groupings { return r } + +// true + +type GroupingsVariant interface { + GroupingsCaster() *Groupings +} + +func (s *Groupings) GroupingsCaster() *Groupings { + return s +} diff --git a/typedapi/types/gsubprocessor.go b/typedapi/types/gsubprocessor.go index 1397e83b30..fddac61c17 100644 --- a/typedapi/types/gsubprocessor.go +++ b/typedapi/types/gsubprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // GsubProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L699-L723 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L983-L1007 type GsubProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -188,3 +188,13 @@ func NewGsubProcessor() *GsubProcessor { return r } + +// true + +type GsubProcessorVariant interface { + GsubProcessorCaster() *GsubProcessor +} + +func (s *GsubProcessor) GsubProcessorCaster() *GsubProcessor { + return s +} diff --git a/typedapi/types/halffloatnumberproperty.go b/typedapi/types/halffloatnumberproperty.go index 25676d2344..8bb401e81e 100644 --- a/typedapi/types/halffloatnumberproperty.go +++ b/typedapi/types/halffloatnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // HalfFloatNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L147-L150 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L151-L154 type HalfFloatNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,13 +48,13 @@ type HalfFloatNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *float32 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *float32 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +163,313 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -554,301 +567,313 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -859,18 +884,6 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -885,6 +898,11 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -932,8 +950,8 @@ func (s HalfFloatNumberProperty) MarshalJSON() ([]byte, error) { OnScriptError: s.OnScriptError, Properties: s.Properties, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -947,10 +965,20 @@ func (s HalfFloatNumberProperty) MarshalJSON() ([]byte, error) { // NewHalfFloatNumberProperty returns a HalfFloatNumberProperty. func NewHalfFloatNumberProperty() *HalfFloatNumberProperty { r := &HalfFloatNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type HalfFloatNumberPropertyVariant interface { + HalfFloatNumberPropertyCaster() *HalfFloatNumberProperty +} + +func (s *HalfFloatNumberProperty) HalfFloatNumberPropertyCaster() *HalfFloatNumberProperty { + return s +} diff --git a/typedapi/types/haschildquery.go b/typedapi/types/haschildquery.go index 305084c0f8..b6d0307900 100644 --- a/typedapi/types/haschildquery.go +++ b/typedapi/types/haschildquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // HasChildQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/joining.ts#L41-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/joining.ts#L41-L79 type HasChildQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -59,7 +59,7 @@ type HasChildQuery struct { // Query Query you wish to run on child documents of the `type` field. // If a child document matches the search, the query returns the parent // document. - Query *Query `json:"query,omitempty"` + Query Query `json:"query"` QueryName_ *string `json:"_name,omitempty"` // ScoreMode Indicates how scores for matching child documents affect the root parent // document’s relevance score. @@ -188,3 +188,13 @@ func NewHasChildQuery() *HasChildQuery { return r } + +// true + +type HasChildQueryVariant interface { + HasChildQueryCaster() *HasChildQuery +} + +func (s *HasChildQuery) HasChildQueryCaster() *HasChildQuery { + return s +} diff --git a/typedapi/types/hasparentquery.go b/typedapi/types/hasparentquery.go index 67f70d3f4c..7027895c20 100644 --- a/typedapi/types/hasparentquery.go +++ b/typedapi/types/hasparentquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HasParentQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/joining.ts#L78-L104 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/joining.ts#L81-L110 type HasParentQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -51,7 +51,7 @@ type HasParentQuery struct { // Query Query you wish to run on parent documents of the `parent_type` field. // If a parent document matches the search, the query returns its child // documents. - Query *Query `json:"query,omitempty"` + Query Query `json:"query"` QueryName_ *string `json:"_name,omitempty"` // Score Indicates whether the relevance score of a matching parent document is // aggregated into its child documents. @@ -155,3 +155,13 @@ func NewHasParentQuery() *HasParentQuery { return r } + +// true + +type HasParentQueryVariant interface { + HasParentQueryCaster() *HasParentQuery +} + +func (s *HasParentQuery) HasParentQueryCaster() *HasParentQuery { + return s +} diff --git a/typedapi/types/hasprivilegesuserprofileerrors.go b/typedapi/types/hasprivilegesuserprofileerrors.go index 22f14ef8d1..898097fd58 100644 --- a/typedapi/types/hasprivilegesuserprofileerrors.go +++ b/typedapi/types/hasprivilegesuserprofileerrors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HasPrivilegesUserProfileErrors type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges_user_profile/types.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges_user_profile/types.ts#L39-L42 type HasPrivilegesUserProfileErrors struct { Count int64 `json:"count"` Details map[string]ErrorCause `json:"details"` @@ -83,8 +83,10 @@ func (s *HasPrivilegesUserProfileErrors) UnmarshalJSON(data []byte) error { // NewHasPrivilegesUserProfileErrors returns a HasPrivilegesUserProfileErrors. func NewHasPrivilegesUserProfileErrors() *HasPrivilegesUserProfileErrors { r := &HasPrivilegesUserProfileErrors{ - Details: make(map[string]ErrorCause, 0), + Details: make(map[string]ErrorCause), } return r } + +// false diff --git a/typedapi/types/hdrmethod.go b/typedapi/types/hdrmethod.go index 0187e3d7b3..9d9ab8ed97 100644 --- a/typedapi/types/hdrmethod.go +++ b/typedapi/types/hdrmethod.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HdrMethod type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L216-L221 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L225-L230 type HdrMethod struct { // NumberOfSignificantValueDigits Specifies the resolution of values for the histogram in number of significant // digits. @@ -80,3 +80,13 @@ func NewHdrMethod() *HdrMethod { return r } + +// true + +type HdrMethodVariant interface { + HdrMethodCaster() *HdrMethod +} + +func (s *HdrMethod) HdrMethodCaster() *HdrMethod { + return s +} diff --git a/typedapi/types/hdrpercentileranksaggregate.go b/typedapi/types/hdrpercentileranksaggregate.go index 2b62cfa386..6ec063a7d1 100644 --- a/typedapi/types/hdrpercentileranksaggregate.go +++ b/typedapi/types/hdrpercentileranksaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // HdrPercentileRanksAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L169-L170 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L171-L172 type HdrPercentileRanksAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *HdrPercentileRanksAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewHdrPercentileRanksAggregate() *HdrPercentileRanksAggregate { return r } + +// false diff --git a/typedapi/types/hdrpercentilesaggregate.go b/typedapi/types/hdrpercentilesaggregate.go index e9566f4033..76b12dc27e 100644 --- a/typedapi/types/hdrpercentilesaggregate.go +++ b/typedapi/types/hdrpercentilesaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // HdrPercentilesAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L166-L167 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L168-L169 type HdrPercentilesAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *HdrPercentilesAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewHdrPercentilesAggregate() *HdrPercentilesAggregate { return r } + +// false diff --git a/typedapi/types/healthrecord.go b/typedapi/types/healthrecord.go index 479989dde5..860e269b4e 100644 --- a/typedapi/types/healthrecord.go +++ b/typedapi/types/healthrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HealthRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/health/types.ts#L23-L94 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/health/types.ts#L23-L99 type HealthRecord struct { // ActiveShardsPercent active number of shards in percent ActiveShardsPercent *string `json:"active_shards_percent,omitempty"` @@ -61,6 +61,8 @@ type HealthRecord struct { Timestamp *string `json:"timestamp,omitempty"` // Unassign number of unassigned shards Unassign *string `json:"unassign,omitempty"` + // UnassignPri number of unassigned primary shards + UnassignPri *string `json:"unassign.pri,omitempty"` } func (s *HealthRecord) UnmarshalJSON(data []byte) error { @@ -232,6 +234,18 @@ func (s *HealthRecord) UnmarshalJSON(data []byte) error { } s.Unassign = &o + case "unassign.pri", "up", "shards.unassigned.primary", "shardsUnassignedPrimary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UnassignPri", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignPri = &o + } } return nil @@ -243,3 +257,5 @@ func NewHealthRecord() *HealthRecord { return r } + +// false diff --git a/typedapi/types/healthresponsebody.go b/typedapi/types/healthresponsebody.go deleted file mode 100644 index 4667bbf9be..0000000000 --- a/typedapi/types/healthresponsebody.go +++ /dev/null @@ -1,309 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus" -) - -// HealthResponseBody type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/health/ClusterHealthResponse.ts#L39-L72 -type HealthResponseBody struct { - // ActivePrimaryShards The number of active primary shards. - ActivePrimaryShards int `json:"active_primary_shards"` - // ActiveShards The total number of active primary and replica shards. - ActiveShards int `json:"active_shards"` - // ActiveShardsPercentAsNumber The ratio of active shards in the cluster expressed as a percentage. - ActiveShardsPercentAsNumber Percentage `json:"active_shards_percent_as_number"` - // ClusterName The name of the cluster. - ClusterName string `json:"cluster_name"` - // DelayedUnassignedShards The number of shards whose allocation has been delayed by the timeout - // settings. - DelayedUnassignedShards int `json:"delayed_unassigned_shards"` - Indices map[string]IndexHealthStats `json:"indices,omitempty"` - // InitializingShards The number of shards that are under initialization. - InitializingShards int `json:"initializing_shards"` - // NumberOfDataNodes The number of nodes that are dedicated data nodes. - NumberOfDataNodes int `json:"number_of_data_nodes"` - // NumberOfInFlightFetch The number of unfinished fetches. - NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` - // NumberOfNodes The number of nodes within the cluster. - NumberOfNodes int `json:"number_of_nodes"` - // NumberOfPendingTasks The number of cluster-level changes that have not yet been executed. - NumberOfPendingTasks int `json:"number_of_pending_tasks"` - // RelocatingShards The number of shards that are under relocation. - RelocatingShards int `json:"relocating_shards"` - Status healthstatus.HealthStatus `json:"status"` - // TaskMaxWaitingInQueue The time since the earliest initiated task is waiting for being performed. - TaskMaxWaitingInQueue Duration `json:"task_max_waiting_in_queue,omitempty"` - // TaskMaxWaitingInQueueMillis The time expressed in milliseconds since the earliest initiated task is - // waiting for being performed. - TaskMaxWaitingInQueueMillis int64 `json:"task_max_waiting_in_queue_millis"` - // TimedOut If false the response returned within the period of time that is specified by - // the timeout parameter (30s by default) - TimedOut bool `json:"timed_out"` - // UnassignedShards The number of shards that are not allocated. - UnassignedShards int `json:"unassigned_shards"` -} - -func (s *HealthResponseBody) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "active_primary_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "ActivePrimaryShards", err) - } - s.ActivePrimaryShards = value - case float64: - f := int(v) - s.ActivePrimaryShards = f - } - - case "active_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "ActiveShards", err) - } - s.ActiveShards = value - case float64: - f := int(v) - s.ActiveShards = f - } - - case "active_shards_percent_as_number": - if err := dec.Decode(&s.ActiveShardsPercentAsNumber); err != nil { - return fmt.Errorf("%s | %w", "ActiveShardsPercentAsNumber", err) - } - - case "cluster_name": - if err := dec.Decode(&s.ClusterName); err != nil { - return fmt.Errorf("%s | %w", "ClusterName", err) - } - - case "delayed_unassigned_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "DelayedUnassignedShards", err) - } - s.DelayedUnassignedShards = value - case float64: - f := int(v) - s.DelayedUnassignedShards = f - } - - case "indices": - if s.Indices == nil { - s.Indices = make(map[string]IndexHealthStats, 0) - } - if err := dec.Decode(&s.Indices); err != nil { - return fmt.Errorf("%s | %w", "Indices", err) - } - - case "initializing_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "InitializingShards", err) - } - s.InitializingShards = value - case float64: - f := int(v) - s.InitializingShards = f - } - - case "number_of_data_nodes": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumberOfDataNodes", err) - } - s.NumberOfDataNodes = value - case float64: - f := int(v) - s.NumberOfDataNodes = f - } - - case "number_of_in_flight_fetch": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumberOfInFlightFetch", err) - } - s.NumberOfInFlightFetch = value - case float64: - f := int(v) - s.NumberOfInFlightFetch = f - } - - case "number_of_nodes": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumberOfNodes", err) - } - s.NumberOfNodes = value - case float64: - f := int(v) - s.NumberOfNodes = f - } - - case "number_of_pending_tasks": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "NumberOfPendingTasks", err) - } - s.NumberOfPendingTasks = value - case float64: - f := int(v) - s.NumberOfPendingTasks = f - } - - case "relocating_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "RelocatingShards", err) - } - s.RelocatingShards = value - case float64: - f := int(v) - s.RelocatingShards = f - } - - case "status": - if err := dec.Decode(&s.Status); err != nil { - return fmt.Errorf("%s | %w", "Status", err) - } - - case "task_max_waiting_in_queue": - if err := dec.Decode(&s.TaskMaxWaitingInQueue); err != nil { - return fmt.Errorf("%s | %w", "TaskMaxWaitingInQueue", err) - } - - case "task_max_waiting_in_queue_millis": - if err := dec.Decode(&s.TaskMaxWaitingInQueueMillis); err != nil { - return fmt.Errorf("%s | %w", "TaskMaxWaitingInQueueMillis", err) - } - - case "timed_out": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "TimedOut", err) - } - s.TimedOut = value - case bool: - s.TimedOut = v - } - - case "unassigned_shards": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "UnassignedShards", err) - } - s.UnassignedShards = value - case float64: - f := int(v) - s.UnassignedShards = f - } - - } - } - return nil -} - -// NewHealthResponseBody returns a HealthResponseBody. -func NewHealthResponseBody() *HealthResponseBody { - r := &HealthResponseBody{ - Indices: make(map[string]IndexHealthStats, 0), - } - - return r -} diff --git a/typedapi/types/healthstatistics.go b/typedapi/types/healthstatistics.go index 9d6d42bd3e..7cd887a1d5 100644 --- a/typedapi/types/healthstatistics.go +++ b/typedapi/types/healthstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HealthStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L153-L155 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L150-L152 type HealthStatistics struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -97,3 +97,5 @@ func NewHealthStatistics() *HealthStatistics { return r } + +// false diff --git a/typedapi/types/highlight.go b/typedapi/types/highlight.go index 0a745970a4..090334a226 100644 --- a/typedapi/types/highlight.go +++ b/typedapi/types/highlight.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -38,7 +38,7 @@ import ( // Highlight type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/highlighting.ts#L153-L156 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/highlighting.ts#L152-L155 type Highlight struct { // BoundaryChars A string that contains each boundary character. BoundaryChars *string `json:"boundary_chars,omitempty"` @@ -378,9 +378,19 @@ func (s *Highlight) UnmarshalJSON(data []byte) error { // NewHighlight returns a Highlight. func NewHighlight() *Highlight { r := &Highlight{ - Fields: make(map[string]HighlightField, 0), - Options: make(map[string]json.RawMessage, 0), + Fields: make(map[string]HighlightField), + Options: make(map[string]json.RawMessage), } return r } + +// true + +type HighlightVariant interface { + HighlightCaster() *Highlight +} + +func (s *Highlight) HighlightCaster() *Highlight { + return s +} diff --git a/typedapi/types/highlightfield.go b/typedapi/types/highlightfield.go index bcfe06592d..55e775fb81 100644 --- a/typedapi/types/highlightfield.go +++ b/typedapi/types/highlightfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -37,9 +37,8 @@ import ( // HighlightField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/highlighting.ts#L193-L197 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/highlighting.ts#L192-L195 type HighlightField struct { - Analyzer Analyzer `json:"analyzer,omitempty"` // BoundaryChars A string that contains each boundary character. BoundaryChars *string `json:"boundary_chars,omitempty"` // BoundaryMaxScan How far to scan for boundary characters. @@ -131,110 +130,6 @@ func (s *HighlightField) UnmarshalJSON(data []byte) error { switch t { - case "analyzer": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - kind := make(map[string]string, 0) - localDec := json.NewDecoder(source) - localDec.Decode(&kind) - source.Seek(0, io.SeekStart) - if _, ok := kind["type"]; !ok { - kind["type"] = "custom" - } - switch kind["type"] { - - case "custom": - o := NewCustomAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "fingerprint": - o := NewFingerprintAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "keyword": - o := NewKeywordAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "language": - o := NewLanguageAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "nori": - o := NewNoriAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "pattern": - o := NewPatternAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "simple": - o := NewSimpleAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "standard": - o := NewStandardAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "stop": - o := NewStopAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "whitespace": - o := NewWhitespaceAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "icu_analyzer": - o := NewIcuAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "kuromoji": - o := NewKuromojiAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "snowball": - o := NewSnowballAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - case "dutch": - o := NewDutchAnalyzer() - if err := localDec.Decode(&o); err != nil { - return err - } - s.Analyzer = *o - default: - if err := localDec.Decode(&s.Analyzer); err != nil { - return err - } - } - case "boundary_chars": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -501,8 +396,18 @@ func (s *HighlightField) UnmarshalJSON(data []byte) error { // NewHighlightField returns a HighlightField. func NewHighlightField() *HighlightField { r := &HighlightField{ - Options: make(map[string]json.RawMessage, 0), + Options: make(map[string]json.RawMessage), } return r } + +// true + +type HighlightFieldVariant interface { + HighlightFieldCaster() *HighlightField +} + +func (s *HighlightField) HighlightFieldCaster() *HighlightField { + return s +} diff --git a/typedapi/types/hindianalyzer.go b/typedapi/types/hindianalyzer.go new file mode 100644 index 0000000000..3123d8f183 --- /dev/null +++ b/typedapi/types/hindianalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HindiAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L179-L184 +type HindiAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *HindiAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HindiAnalyzer) MarshalJSON() ([]byte, error) { + type innerHindiAnalyzer HindiAnalyzer + tmp := innerHindiAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "hindi" + + return json.Marshal(tmp) +} + +// NewHindiAnalyzer returns a HindiAnalyzer. +func NewHindiAnalyzer() *HindiAnalyzer { + r := &HindiAnalyzer{} + + return r +} + +// true + +type HindiAnalyzerVariant interface { + HindiAnalyzerCaster() *HindiAnalyzer +} + +func (s *HindiAnalyzer) HindiAnalyzerCaster() *HindiAnalyzer { + return s +} diff --git a/typedapi/types/hint.go b/typedapi/types/hint.go index 50edbb7393..ea9c83427c 100644 --- a/typedapi/types/hint.go +++ b/typedapi/types/hint.go @@ -16,27 +16,37 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Hint type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/suggest_user_profiles/types.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/suggest_user_profiles/types.ts#L23-L34 type Hint struct { // Labels A single key-value pair to match against the labels section // of a profile. A profile is considered matching if it matches // at least one of the strings. Labels map[string][]string `json:"labels,omitempty"` - // Uids A list of Profile UIDs to match against. + // Uids A list of profile UIDs to match against. Uids []string `json:"uids,omitempty"` } // NewHint returns a Hint. func NewHint() *Hint { r := &Hint{ - Labels: make(map[string][]string, 0), + Labels: make(map[string][]string), } return r } + +// true + +type HintVariant interface { + HintCaster() *Hint +} + +func (s *Hint) HintCaster() *Hint { + return s +} diff --git a/typedapi/types/histogramaggregate.go b/typedapi/types/histogramaggregate.go index 25e06b4c84..5e0bb27883 100644 --- a/typedapi/types/histogramaggregate.go +++ b/typedapi/types/histogramaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // HistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L342-L343 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L372-L376 type HistogramAggregate struct { Buckets BucketsHistogramBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewHistogramAggregate() *HistogramAggregate { return r } + +// false diff --git a/typedapi/types/histogramaggregation.go b/typedapi/types/histogramaggregation.go index 4203b18f2f..e422ff2993 100644 --- a/typedapi/types/histogramaggregation.go +++ b/typedapi/types/histogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // HistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L502-L548 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L519-L565 type HistogramAggregation struct { // ExtendedBounds Enables extending the bounds of the histogram beyond the data itself. ExtendedBounds *ExtendedBoundsdouble `json:"extended_bounds,omitempty"` @@ -223,3 +223,13 @@ func NewHistogramAggregation() *HistogramAggregation { return r } + +// true + +type HistogramAggregationVariant interface { + HistogramAggregationCaster() *HistogramAggregation +} + +func (s *HistogramAggregation) HistogramAggregationCaster() *HistogramAggregation { + return s +} diff --git a/typedapi/types/histogrambucket.go b/typedapi/types/histogrambucket.go index 66de69940f..befe6a66b3 100644 --- a/typedapi/types/histogrambucket.go +++ b/typedapi/types/histogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // HistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L345-L348 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L378-L381 type HistogramBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -515,6 +515,13 @@ func (s *HistogramBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -648,8 +655,10 @@ func (s HistogramBucket) MarshalJSON() ([]byte, error) { // NewHistogramBucket returns a HistogramBucket. func NewHistogramBucket() *HistogramBucket { r := &HistogramBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/histogramgrouping.go b/typedapi/types/histogramgrouping.go index 241683a99e..de6e229c63 100644 --- a/typedapi/types/histogramgrouping.go +++ b/typedapi/types/histogramgrouping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HistogramGrouping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/_types/Groupings.ts#L84-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/_types/Groupings.ts#L84-L97 type HistogramGrouping struct { // Fields The set of fields that you wish to build histograms for. // All fields specified must be some kind of numeric. @@ -102,3 +102,13 @@ func NewHistogramGrouping() *HistogramGrouping { return r } + +// true + +type HistogramGroupingVariant interface { + HistogramGroupingCaster() *HistogramGrouping +} + +func (s *HistogramGrouping) HistogramGroupingCaster() *HistogramGrouping { + return s +} diff --git a/typedapi/types/histogramproperty.go b/typedapi/types/histogramproperty.go index f47501ee32..2539c0e6f0 100644 --- a/typedapi/types/histogramproperty.go +++ b/typedapi/types/histogramproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // HistogramProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/specialized.ts#L60-L63 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L69-L72 type HistogramProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *HistogramProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -441,306 +455,323 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -755,13 +786,14 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { func (s HistogramProperty) MarshalJSON() ([]byte, error) { type innerHistogramProperty HistogramProperty tmp := innerHistogramProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - Meta: s.Meta, - Properties: s.Properties, - Type: s.Type, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "histogram" @@ -772,10 +804,20 @@ func (s HistogramProperty) MarshalJSON() ([]byte, error) { // NewHistogramProperty returns a HistogramProperty. func NewHistogramProperty() *HistogramProperty { r := &HistogramProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type HistogramPropertyVariant interface { + HistogramPropertyCaster() *HistogramProperty +} + +func (s *HistogramProperty) HistogramPropertyCaster() *HistogramProperty { + return s +} diff --git a/typedapi/types/hit.go b/typedapi/types/hit.go index 0fb7cb9577..05183da2f2 100644 --- a/typedapi/types/hit.go +++ b/typedapi/types/hit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,13 +31,13 @@ import ( // Hit type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/hits.ts#L40-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/hits.ts#L41-L66 type Hit struct { Explanation_ *Explanation `json:"_explanation,omitempty"` Fields map[string]json.RawMessage `json:"fields,omitempty"` Highlight map[string][]string `json:"highlight,omitempty"` Id_ *string `json:"_id,omitempty"` - IgnoredFieldValues map[string][]string `json:"ignored_field_values,omitempty"` + IgnoredFieldValues map[string][]FieldValue `json:"ignored_field_values,omitempty"` Ignored_ []string `json:"_ignored,omitempty"` Index_ string `json:"_index"` InnerHits map[string]InnerHitsResult `json:"inner_hits,omitempty"` @@ -98,7 +98,7 @@ func (s *Hit) UnmarshalJSON(data []byte) error { case "ignored_field_values": if s.IgnoredFieldValues == nil { - s.IgnoredFieldValues = make(map[string][]string, 0) + s.IgnoredFieldValues = make(map[string][]FieldValue, 0) } if err := dec.Decode(&s.IgnoredFieldValues); err != nil { return fmt.Errorf("%s | %w", "IgnoredFieldValues", err) @@ -130,7 +130,7 @@ func (s *Hit) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(map[string][]Float64, 0) + o := make(map[string]Float64, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "MatchedQueries", err) } @@ -248,11 +248,13 @@ func (s *Hit) UnmarshalJSON(data []byte) error { // NewHit returns a Hit. func NewHit() *Hit { r := &Hit{ - Fields: make(map[string]json.RawMessage, 0), - Highlight: make(map[string][]string, 0), - IgnoredFieldValues: make(map[string][]string, 0), - InnerHits: make(map[string]InnerHitsResult, 0), + Fields: make(map[string]json.RawMessage), + Highlight: make(map[string][]string), + IgnoredFieldValues: make(map[string][]FieldValue), + InnerHits: make(map[string]InnerHitsResult), } return r } + +// false diff --git a/typedapi/types/hitsevent.go b/typedapi/types/hitsevent.go index 24ba793f53..f44e1c9bf6 100644 --- a/typedapi/types/hitsevent.go +++ b/typedapi/types/hitsevent.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HitsEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/eql/_types/EqlHits.ts#L41-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/eql/_types/EqlHits.ts#L41-L54 type HitsEvent struct { Fields map[string][]json.RawMessage `json:"fields,omitempty"` // Id_ Unique identifier for the event. This ID is only unique within the index. @@ -105,8 +105,10 @@ func (s *HitsEvent) UnmarshalJSON(data []byte) error { // NewHitsEvent returns a HitsEvent. func NewHitsEvent() *HitsEvent { r := &HitsEvent{ - Fields: make(map[string][]json.RawMessage, 0), + Fields: make(map[string][]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/hitsmetadata.go b/typedapi/types/hitsmetadata.go index aafdfd9afc..b635709449 100644 --- a/typedapi/types/hitsmetadata.go +++ b/typedapi/types/hitsmetadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // HitsMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/hits.ts#L67-L73 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/hits.ts#L68-L74 type HitsMetadata struct { Hits []Hit `json:"hits"` MaxScore *Float64 `json:"max_score,omitempty"` @@ -80,3 +80,5 @@ func NewHitsMetadata() *HitsMetadata { return r } + +// false diff --git a/typedapi/types/hitssequence.go b/typedapi/types/hitssequence.go index 7ae87e0ecf..94592a361d 100644 --- a/typedapi/types/hitssequence.go +++ b/typedapi/types/hitssequence.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // HitsSequence type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/eql/_types/EqlHits.ts#L56-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/eql/_types/EqlHits.ts#L56-L64 type HitsSequence struct { // Events Contains events matching the query. Each object represents a matching event. Events []HitsEvent `json:"events"` @@ -41,3 +41,5 @@ func NewHitsSequence() *HitsSequence { return r } + +// false diff --git a/typedapi/types/holtlinearmodelsettings.go b/typedapi/types/holtlinearmodelsettings.go index 465097e3eb..19f2b99778 100644 --- a/typedapi/types/holtlinearmodelsettings.go +++ b/typedapi/types/holtlinearmodelsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HoltLinearModelSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L271-L274 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L297-L300 type HoltLinearModelSettings struct { Alpha *float32 `json:"alpha,omitempty"` Beta *float32 `json:"beta,omitempty"` @@ -95,3 +95,13 @@ func NewHoltLinearModelSettings() *HoltLinearModelSettings { return r } + +// true + +type HoltLinearModelSettingsVariant interface { + HoltLinearModelSettingsCaster() *HoltLinearModelSettings +} + +func (s *HoltLinearModelSettings) HoltLinearModelSettingsCaster() *HoltLinearModelSettings { + return s +} diff --git a/typedapi/types/holtmovingaverageaggregation.go b/typedapi/types/holtmovingaverageaggregation.go index 06ecb705c8..1c64a9ea0d 100644 --- a/typedapi/types/holtmovingaverageaggregation.go +++ b/typedapi/types/holtmovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // HoltMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L257-L260 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L283-L286 type HoltMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewHoltMovingAverageAggregation() *HoltMovingAverageAggregation { return r } + +// true + +type HoltMovingAverageAggregationVariant interface { + HoltMovingAverageAggregationCaster() *HoltMovingAverageAggregation +} + +func (s *HoltMovingAverageAggregation) HoltMovingAverageAggregationCaster() *HoltMovingAverageAggregation { + return s +} diff --git a/typedapi/types/holtwintersmodelsettings.go b/typedapi/types/holtwintersmodelsettings.go index 01f4388663..34c435c680 100644 --- a/typedapi/types/holtwintersmodelsettings.go +++ b/typedapi/types/holtwintersmodelsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // HoltWintersModelSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L275-L282 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L301-L308 type HoltWintersModelSettings struct { Alpha *float32 `json:"alpha,omitempty"` Beta *float32 `json:"beta,omitempty"` @@ -152,3 +152,13 @@ func NewHoltWintersModelSettings() *HoltWintersModelSettings { return r } + +// true + +type HoltWintersModelSettingsVariant interface { + HoltWintersModelSettingsCaster() *HoltWintersModelSettings +} + +func (s *HoltWintersModelSettings) HoltWintersModelSettingsCaster() *HoltWintersModelSettings { + return s +} diff --git a/typedapi/types/holtwintersmovingaverageaggregation.go b/typedapi/types/holtwintersmovingaverageaggregation.go index ac62d11734..84ed128149 100644 --- a/typedapi/types/holtwintersmovingaverageaggregation.go +++ b/typedapi/types/holtwintersmovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // HoltWintersMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L262-L265 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L288-L291 type HoltWintersMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewHoltWintersMovingAverageAggregation() *HoltWintersMovingAverageAggregati return r } + +// true + +type HoltWintersMovingAverageAggregationVariant interface { + HoltWintersMovingAverageAggregationCaster() *HoltWintersMovingAverageAggregation +} + +func (s *HoltWintersMovingAverageAggregation) HoltWintersMovingAverageAggregationCaster() *HoltWintersMovingAverageAggregation { + return s +} diff --git a/typedapi/types/hop.go b/typedapi/types/hop.go index 0ab9babc51..5706380ccc 100644 --- a/typedapi/types/hop.go +++ b/typedapi/types/hop.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Hop type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/graph/_types/Hop.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/graph/_types/Hop.ts#L23-L36 type Hop struct { // Connections Specifies one or more fields from which you want to extract terms that are // associated with the specified vertices. @@ -40,3 +40,13 @@ func NewHop() *Hop { return r } + +// true + +type HopVariant interface { + HopCaster() *Hop +} + +func (s *Hop) HopCaster() *Hop { + return s +} diff --git a/typedapi/types/hourandminute.go b/typedapi/types/hourandminute.go index 19511773c1..50dfe962e6 100644 --- a/typedapi/types/hourandminute.go +++ b/typedapi/types/hourandminute.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // HourAndMinute type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L105-L108 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L106-L109 type HourAndMinute struct { Hour []int `json:"hour"` Minute []int `json:"minute"` @@ -34,3 +34,13 @@ func NewHourAndMinute() *HourAndMinute { return r } + +// true + +type HourAndMinuteVariant interface { + HourAndMinuteCaster() *HourAndMinute +} + +func (s *HourAndMinute) HourAndMinuteCaster() *HourAndMinute { + return s +} diff --git a/typedapi/types/hourlyschedule.go b/typedapi/types/hourlyschedule.go index 442d11a276..9cbe04a47a 100644 --- a/typedapi/types/hourlyschedule.go +++ b/typedapi/types/hourlyschedule.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // HourlySchedule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L47-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L47-L49 type HourlySchedule struct { Minute []int `json:"minute"` } @@ -33,3 +33,13 @@ func NewHourlySchedule() *HourlySchedule { return r } + +// true + +type HourlyScheduleVariant interface { + HourlyScheduleCaster() *HourlySchedule +} + +func (s *HourlySchedule) HourlyScheduleCaster() *HourlySchedule { + return s +} diff --git a/typedapi/types/htmlstripcharfilter.go b/typedapi/types/htmlstripcharfilter.go index 7b59c84978..8fda015e8b 100644 --- a/typedapi/types/htmlstripcharfilter.go +++ b/typedapi/types/htmlstripcharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // HtmlStripCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/char_filters.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/char_filters.ts#L46-L49 type HtmlStripCharFilter struct { EscapedTags []string `json:"escaped_tags,omitempty"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewHtmlStripCharFilter() *HtmlStripCharFilter { return r } + +// true + +type HtmlStripCharFilterVariant interface { + HtmlStripCharFilterCaster() *HtmlStripCharFilter +} + +func (s *HtmlStripCharFilter) HtmlStripCharFilterCaster() *HtmlStripCharFilter { + return s +} diff --git a/typedapi/types/htmlstripprocessor.go b/typedapi/types/htmlstripprocessor.go new file mode 100644 index 0000000000..765d6cecf0 --- /dev/null +++ b/typedapi/types/htmlstripprocessor.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HtmlStripProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1009-L1025 +type HtmlStripProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The string-valued field to remove HTML tags from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document, + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to + // By default, the `field` is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *HtmlStripProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewHtmlStripProcessor returns a HtmlStripProcessor. +func NewHtmlStripProcessor() *HtmlStripProcessor { + r := &HtmlStripProcessor{} + + return r +} + +// true + +type HtmlStripProcessorVariant interface { + HtmlStripProcessorCaster() *HtmlStripProcessor +} + +func (s *HtmlStripProcessor) HtmlStripProcessorCaster() *HtmlStripProcessor { + return s +} diff --git a/typedapi/types/http.go b/typedapi/types/http.go index 2f4a31936c..7d8af4e444 100644 --- a/typedapi/types/http.go +++ b/typedapi/types/http.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Http type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L633-L647 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L669-L688 type Http struct { // Clients Information on current and recently-closed HTTP client connections. // Clients that have been closed longer than the @@ -40,6 +40,8 @@ type Http struct { Clients []Client `json:"clients,omitempty"` // CurrentOpen Current number of open HTTP connections for the node. CurrentOpen *int `json:"current_open,omitempty"` + // Routes Detailed HTTP stats broken down by route + Routes map[string]HttpRoute `json:"routes"` // TotalOpened Total number of HTTP connections opened for the node. TotalOpened *int64 `json:"total_opened,omitempty"` } @@ -80,6 +82,14 @@ func (s *Http) UnmarshalJSON(data []byte) error { s.CurrentOpen = &f } + case "routes": + if s.Routes == nil { + s.Routes = make(map[string]HttpRoute, 0) + } + if err := dec.Decode(&s.Routes); err != nil { + return fmt.Errorf("%s | %w", "Routes", err) + } + case "total_opened": var tmp any dec.Decode(&tmp) @@ -102,7 +112,11 @@ func (s *Http) UnmarshalJSON(data []byte) error { // NewHttp returns a Http. func NewHttp() *Http { - r := &Http{} + r := &Http{ + Routes: make(map[string]HttpRoute), + } return r } + +// false diff --git a/typedapi/types/httpemailattachment.go b/typedapi/types/httpemailattachment.go index a5231a8997..d6c1a737c5 100644 --- a/typedapi/types/httpemailattachment.go +++ b/typedapi/types/httpemailattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HttpEmailAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L218-L222 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L218-L222 type HttpEmailAttachment struct { ContentType *string `json:"content_type,omitempty"` Inline *bool `json:"inline,omitempty"` @@ -95,3 +95,13 @@ func NewHttpEmailAttachment() *HttpEmailAttachment { return r } + +// true + +type HttpEmailAttachmentVariant interface { + HttpEmailAttachmentCaster() *HttpEmailAttachment +} + +func (s *HttpEmailAttachment) HttpEmailAttachmentCaster() *HttpEmailAttachment { + return s +} diff --git a/typedapi/types/httpheaders.go b/typedapi/types/httpheaders.go index 617e121f08..00abda8f93 100644 --- a/typedapi/types/httpheaders.go +++ b/typedapi/types/httpheaders.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // HttpHeaders type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L158-L158 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L162-L162 type HttpHeaders map[string][]string + +type HttpHeadersVariant interface { + HttpHeadersCaster() *HttpHeaders +} diff --git a/typedapi/types/httpinput.go b/typedapi/types/httpinput.go index 80021c81a8..c103cf86cc 100644 --- a/typedapi/types/httpinput.go +++ b/typedapi/types/httpinput.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // HttpInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L44-L48 type HttpInput struct { Extract []string `json:"extract,omitempty"` Request *HttpInputRequestDefinition `json:"request,omitempty"` @@ -39,3 +39,13 @@ func NewHttpInput() *HttpInput { return r } + +// true + +type HttpInputVariant interface { + HttpInputCaster() *HttpInput +} + +func (s *HttpInput) HttpInputCaster() *HttpInput { + return s +} diff --git a/typedapi/types/httpinputauthentication.go b/typedapi/types/httpinputauthentication.go index 3d07115298..bb37cebb15 100644 --- a/typedapi/types/httpinputauthentication.go +++ b/typedapi/types/httpinputauthentication.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // HttpInputAuthentication type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L50-L52 type HttpInputAuthentication struct { Basic HttpInputBasicAuthentication `json:"basic"` } @@ -33,3 +33,13 @@ func NewHttpInputAuthentication() *HttpInputAuthentication { return r } + +// true + +type HttpInputAuthenticationVariant interface { + HttpInputAuthenticationCaster() *HttpInputAuthentication +} + +func (s *HttpInputAuthentication) HttpInputAuthenticationCaster() *HttpInputAuthentication { + return s +} diff --git a/typedapi/types/httpinputbasicauthentication.go b/typedapi/types/httpinputbasicauthentication.go index 83580dfd0b..9d25f8cb79 100644 --- a/typedapi/types/httpinputbasicauthentication.go +++ b/typedapi/types/httpinputbasicauthentication.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // HttpInputBasicAuthentication type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L54-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L54-L57 type HttpInputBasicAuthentication struct { Password string `json:"password"` Username string `json:"username"` @@ -72,3 +72,13 @@ func NewHttpInputBasicAuthentication() *HttpInputBasicAuthentication { return r } + +// true + +type HttpInputBasicAuthenticationVariant interface { + HttpInputBasicAuthenticationCaster() *HttpInputBasicAuthentication +} + +func (s *HttpInputBasicAuthentication) HttpInputBasicAuthenticationCaster() *HttpInputBasicAuthentication { + return s +} diff --git a/typedapi/types/httpinputproxy.go b/typedapi/types/httpinputproxy.go index 35421a2689..f13686f9d4 100644 --- a/typedapi/types/httpinputproxy.go +++ b/typedapi/types/httpinputproxy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // HttpInputProxy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L67-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L67-L70 type HttpInputProxy struct { Host string `json:"host"` Port uint `json:"port"` @@ -72,3 +72,13 @@ func NewHttpInputProxy() *HttpInputProxy { return r } + +// true + +type HttpInputProxyVariant interface { + HttpInputProxyCaster() *HttpInputProxy +} + +func (s *HttpInputProxy) HttpInputProxyCaster() *HttpInputProxy { + return s +} diff --git a/typedapi/types/httpinputrequestdefinition.go b/typedapi/types/httpinputrequestdefinition.go index db24e02f22..511a2d98a3 100644 --- a/typedapi/types/httpinputrequestdefinition.go +++ b/typedapi/types/httpinputrequestdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // HttpInputRequestDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L72-L86 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L72-L86 type HttpInputRequestDefinition struct { Auth *HttpInputAuthentication `json:"auth,omitempty"` Body *string `json:"body,omitempty"` @@ -166,9 +166,19 @@ func (s *HttpInputRequestDefinition) UnmarshalJSON(data []byte) error { // NewHttpInputRequestDefinition returns a HttpInputRequestDefinition. func NewHttpInputRequestDefinition() *HttpInputRequestDefinition { r := &HttpInputRequestDefinition{ - Headers: make(map[string]string, 0), - Params: make(map[string]string, 0), + Headers: make(map[string]string), + Params: make(map[string]string), } return r } + +// true + +type HttpInputRequestDefinitionVariant interface { + HttpInputRequestDefinitionCaster() *HttpInputRequestDefinition +} + +func (s *HttpInputRequestDefinition) HttpInputRequestDefinitionCaster() *HttpInputRequestDefinition { + return s +} diff --git a/typedapi/types/httpinputrequestresult.go b/typedapi/types/httpinputrequestresult.go index 5aa57f544c..dc6983e7d7 100644 --- a/typedapi/types/httpinputrequestresult.go +++ b/typedapi/types/httpinputrequestresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // HttpInputRequestResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L300-L300 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L300-L300 type HttpInputRequestResult struct { Auth *HttpInputAuthentication `json:"auth,omitempty"` Body *string `json:"body,omitempty"` @@ -166,9 +166,11 @@ func (s *HttpInputRequestResult) UnmarshalJSON(data []byte) error { // NewHttpInputRequestResult returns a HttpInputRequestResult. func NewHttpInputRequestResult() *HttpInputRequestResult { r := &HttpInputRequestResult{ - Headers: make(map[string]string, 0), - Params: make(map[string]string, 0), + Headers: make(map[string]string), + Params: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/httpinputresponseresult.go b/typedapi/types/httpinputresponseresult.go index 4cdacc1f06..b5d80dd12f 100644 --- a/typedapi/types/httpinputresponseresult.go +++ b/typedapi/types/httpinputresponseresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HttpInputResponseResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L302-L306 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L302-L306 type HttpInputResponseResult struct { Body string `json:"body"` Headers HttpHeaders `json:"headers"` @@ -97,3 +97,5 @@ func NewHttpInputResponseResult() *HttpInputResponseResult { return r } + +// false diff --git a/typedapi/types/httproute.go b/typedapi/types/httproute.go new file mode 100644 index 0000000000..85262bf7b1 --- /dev/null +++ b/typedapi/types/httproute.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// HttpRoute type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L690-L693 +type HttpRoute struct { + Requests HttpRouteRequests `json:"requests"` + Responses HttpRouteResponses `json:"responses"` +} + +// NewHttpRoute returns a HttpRoute. +func NewHttpRoute() *HttpRoute { + r := &HttpRoute{} + + return r +} + +// false diff --git a/typedapi/types/httprouterequests.go b/typedapi/types/httprouterequests.go new file mode 100644 index 0000000000..f8e6dbf904 --- /dev/null +++ b/typedapi/types/httprouterequests.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HttpRouteRequests type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L695-L699 +type HttpRouteRequests struct { + Count int64 `json:"count"` + SizeHistogram []SizeHttpHistogram `json:"size_histogram"` + TotalSizeInBytes int64 `json:"total_size_in_bytes"` +} + +func (s *HttpRouteRequests) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "size_histogram": + if err := dec.Decode(&s.SizeHistogram); err != nil { + return fmt.Errorf("%s | %w", "SizeHistogram", err) + } + + case "total_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeInBytes", err) + } + s.TotalSizeInBytes = value + case float64: + f := int64(v) + s.TotalSizeInBytes = f + } + + } + } + return nil +} + +// NewHttpRouteRequests returns a HttpRouteRequests. +func NewHttpRouteRequests() *HttpRouteRequests { + r := &HttpRouteRequests{} + + return r +} + +// false diff --git a/typedapi/types/httprouteresponses.go b/typedapi/types/httprouteresponses.go new file mode 100644 index 0000000000..a790bd1769 --- /dev/null +++ b/typedapi/types/httprouteresponses.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HttpRouteResponses type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L701-L706 +type HttpRouteResponses struct { + Count int64 `json:"count"` + HandlingTimeHistogram []TimeHttpHistogram `json:"handling_time_histogram"` + SizeHistogram []SizeHttpHistogram `json:"size_histogram"` + TotalSizeInBytes int64 `json:"total_size_in_bytes"` +} + +func (s *HttpRouteResponses) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "handling_time_histogram": + if err := dec.Decode(&s.HandlingTimeHistogram); err != nil { + return fmt.Errorf("%s | %w", "HandlingTimeHistogram", err) + } + + case "size_histogram": + if err := dec.Decode(&s.SizeHistogram); err != nil { + return fmt.Errorf("%s | %w", "SizeHistogram", err) + } + + case "total_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeInBytes", err) + } + s.TotalSizeInBytes = value + case float64: + f := int64(v) + s.TotalSizeInBytes = f + } + + } + } + return nil +} + +// NewHttpRouteResponses returns a HttpRouteResponses. +func NewHttpRouteResponses() *HttpRouteResponses { + r := &HttpRouteResponses{} + + return r +} + +// false diff --git a/typedapi/types/hungariananalyzer.go b/typedapi/types/hungariananalyzer.go new file mode 100644 index 0000000000..6b9a5bbdd5 --- /dev/null +++ b/typedapi/types/hungariananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HungarianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L186-L191 +type HungarianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *HungarianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HungarianAnalyzer) MarshalJSON() ([]byte, error) { + type innerHungarianAnalyzer HungarianAnalyzer + tmp := innerHungarianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "hungarian" + + return json.Marshal(tmp) +} + +// NewHungarianAnalyzer returns a HungarianAnalyzer. +func NewHungarianAnalyzer() *HungarianAnalyzer { + r := &HungarianAnalyzer{} + + return r +} + +// true + +type HungarianAnalyzerVariant interface { + HungarianAnalyzerCaster() *HungarianAnalyzer +} + +func (s *HungarianAnalyzer) HungarianAnalyzerCaster() *HungarianAnalyzer { + return s +} diff --git a/typedapi/types/hunspelltokenfilter.go b/typedapi/types/hunspelltokenfilter.go index 3de32e7b36..60e0b5a3da 100644 --- a/typedapi/types/hunspelltokenfilter.go +++ b/typedapi/types/hunspelltokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HunspellTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L202-L208 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L201-L207 type HunspellTokenFilter struct { Dedup *bool `json:"dedup,omitempty"` Dictionary *string `json:"dictionary,omitempty"` @@ -146,3 +146,13 @@ func NewHunspellTokenFilter() *HunspellTokenFilter { return r } + +// true + +type HunspellTokenFilterVariant interface { + HunspellTokenFilterCaster() *HunspellTokenFilter +} + +func (s *HunspellTokenFilter) HunspellTokenFilterCaster() *HunspellTokenFilter { + return s +} diff --git a/typedapi/types/hyperparameter.go b/typedapi/types/hyperparameter.go index 0bce5f8dc9..cb64b0656e 100644 --- a/typedapi/types/hyperparameter.go +++ b/typedapi/types/hyperparameter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Hyperparameter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L217-L231 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L273-L287 type Hyperparameter struct { // AbsoluteImportance A positive number showing how much the parameter influences the variation of // the loss function. For hyperparameters with values that are not specified by @@ -144,3 +144,5 @@ func NewHyperparameter() *Hyperparameter { return r } + +// false diff --git a/typedapi/types/hyperparameters.go b/typedapi/types/hyperparameters.go index 475e8e5112..c109830d89 100644 --- a/typedapi/types/hyperparameters.go +++ b/typedapi/types/hyperparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Hyperparameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L419-L525 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L420-L526 type Hyperparameters struct { // Alpha Advanced configuration option. // Machine learning uses loss guided tree growing, which means that the decision @@ -382,3 +382,5 @@ func NewHyperparameters() *Hyperparameters { return r } + +// false diff --git a/typedapi/types/hyphenationdecompoundertokenfilter.go b/typedapi/types/hyphenationdecompoundertokenfilter.go index bf01f182d0..4f63211255 100644 --- a/typedapi/types/hyphenationdecompoundertokenfilter.go +++ b/typedapi/types/hyphenationdecompoundertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // HyphenationDecompounderTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L58-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L57-L59 type HyphenationDecompounderTokenFilter struct { HyphenationPatternsPath *string `json:"hyphenation_patterns_path,omitempty"` MaxSubwordSize *int `json:"max_subword_size,omitempty"` @@ -191,3 +191,13 @@ func NewHyphenationDecompounderTokenFilter() *HyphenationDecompounderTokenFilter return r } + +// true + +type HyphenationDecompounderTokenFilterVariant interface { + HyphenationDecompounderTokenFilterCaster() *HyphenationDecompounderTokenFilter +} + +func (s *HyphenationDecompounderTokenFilter) HyphenationDecompounderTokenFilterCaster() *HyphenationDecompounderTokenFilter { + return s +} diff --git a/typedapi/types/icuanalyzer.go b/typedapi/types/icuanalyzer.go index 9110d62f42..279a4d7309 100644 --- a/typedapi/types/icuanalyzer.go +++ b/typedapi/types/icuanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,7 +29,7 @@ import ( // IcuAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L67-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L67-L71 type IcuAnalyzer struct { Method icunormalizationtype.IcuNormalizationType `json:"method"` Mode icunormalizationmode.IcuNormalizationMode `json:"mode"` @@ -56,3 +56,13 @@ func NewIcuAnalyzer() *IcuAnalyzer { return r } + +// true + +type IcuAnalyzerVariant interface { + IcuAnalyzerCaster() *IcuAnalyzer +} + +func (s *IcuAnalyzer) IcuAnalyzerCaster() *IcuAnalyzer { + return s +} diff --git a/typedapi/types/icucollationproperty.go b/typedapi/types/icucollationproperty.go index d56a76d508..47223b34d5 100644 --- a/typedapi/types/icucollationproperty.go +++ b/typedapi/types/icucollationproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,11 +34,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationdecomposition" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationstrength" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // IcuCollationProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/specialized.ts#L94-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L103-L127 type IcuCollationProperty struct { Alternate *icucollationalternate.IcuCollationAlternate `json:"alternate,omitempty"` CaseFirst *icucollationcasefirst.IcuCollationCaseFirst `json:"case_first,omitempty"` @@ -60,16 +61,16 @@ type IcuCollationProperty struct { Norms *bool `json:"norms,omitempty"` // NullValue Accepts a string value which is substituted for any explicit null values. // Defaults to null, which means the field is treated as missing. - NullValue *string `json:"null_value,omitempty"` - Numeric *bool `json:"numeric,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Rules *string `json:"rules,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Strength *icucollationstrength.IcuCollationStrength `json:"strength,omitempty"` - Type string `json:"type,omitempty"` - VariableTop *string `json:"variable_top,omitempty"` - Variant *string `json:"variant,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Numeric *bool `json:"numeric,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Rules *string `json:"rules,omitempty"` + Store *bool `json:"store,omitempty"` + Strength *icucollationstrength.IcuCollationStrength `json:"strength,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` + VariableTop *string `json:"variable_top,omitempty"` + Variant *string `json:"variant,omitempty"` } func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { @@ -182,301 +183,313 @@ func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -610,301 +623,313 @@ func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -922,18 +947,6 @@ func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { } s.Rules = &o - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -953,6 +966,11 @@ func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Strength", err) } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -1011,9 +1029,9 @@ func (s IcuCollationProperty) MarshalJSON() ([]byte, error) { Numeric: s.Numeric, Properties: s.Properties, Rules: s.Rules, - Similarity: s.Similarity, Store: s.Store, Strength: s.Strength, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, VariableTop: s.VariableTop, Variant: s.Variant, @@ -1027,10 +1045,20 @@ func (s IcuCollationProperty) MarshalJSON() ([]byte, error) { // NewIcuCollationProperty returns a IcuCollationProperty. func NewIcuCollationProperty() *IcuCollationProperty { r := &IcuCollationProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IcuCollationPropertyVariant interface { + IcuCollationPropertyCaster() *IcuCollationProperty +} + +func (s *IcuCollationProperty) IcuCollationPropertyCaster() *IcuCollationProperty { + return s +} diff --git a/typedapi/types/icucollationtokenfilter.go b/typedapi/types/icucollationtokenfilter.go index 54f0f91257..188696f3b5 100644 --- a/typedapi/types/icucollationtokenfilter.go +++ b/typedapi/types/icucollationtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -36,7 +36,7 @@ import ( // IcuCollationTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L51-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L51-L65 type IcuCollationTokenFilter struct { Alternate *icucollationalternate.IcuCollationAlternate `json:"alternate,omitempty"` CaseFirst *icucollationcasefirst.IcuCollationCaseFirst `json:"case_first,omitempty"` @@ -237,3 +237,13 @@ func NewIcuCollationTokenFilter() *IcuCollationTokenFilter { return r } + +// true + +type IcuCollationTokenFilterVariant interface { + IcuCollationTokenFilterCaster() *IcuCollationTokenFilter +} + +func (s *IcuCollationTokenFilter) IcuCollationTokenFilterCaster() *IcuCollationTokenFilter { + return s +} diff --git a/typedapi/types/icufoldingtokenfilter.go b/typedapi/types/icufoldingtokenfilter.go index 48a6224101..cdb0a415ad 100644 --- a/typedapi/types/icufoldingtokenfilter.go +++ b/typedapi/types/icufoldingtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IcuFoldingTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L46-L49 type IcuFoldingTokenFilter struct { Type string `json:"type,omitempty"` UnicodeSetFilter string `json:"unicode_set_filter"` @@ -100,3 +100,13 @@ func NewIcuFoldingTokenFilter() *IcuFoldingTokenFilter { return r } + +// true + +type IcuFoldingTokenFilterVariant interface { + IcuFoldingTokenFilterCaster() *IcuFoldingTokenFilter +} + +func (s *IcuFoldingTokenFilter) IcuFoldingTokenFilterCaster() *IcuFoldingTokenFilter { + return s +} diff --git a/typedapi/types/icunormalizationcharfilter.go b/typedapi/types/icunormalizationcharfilter.go index cb3ebafde8..9650befa4e 100644 --- a/typedapi/types/icunormalizationcharfilter.go +++ b/typedapi/types/icunormalizationcharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // IcuNormalizationCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L40-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L40-L44 type IcuNormalizationCharFilter struct { Mode *icunormalizationmode.IcuNormalizationMode `json:"mode,omitempty"` Name *icunormalizationtype.IcuNormalizationType `json:"name,omitempty"` @@ -102,3 +102,13 @@ func NewIcuNormalizationCharFilter() *IcuNormalizationCharFilter { return r } + +// true + +type IcuNormalizationCharFilterVariant interface { + IcuNormalizationCharFilterCaster() *IcuNormalizationCharFilter +} + +func (s *IcuNormalizationCharFilter) IcuNormalizationCharFilterCaster() *IcuNormalizationCharFilter { + return s +} diff --git a/typedapi/types/icunormalizationtokenfilter.go b/typedapi/types/icunormalizationtokenfilter.go index e5319eb7a0..05908d292a 100644 --- a/typedapi/types/icunormalizationtokenfilter.go +++ b/typedapi/types/icunormalizationtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // IcuNormalizationTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L35-L38 type IcuNormalizationTokenFilter struct { Name icunormalizationtype.IcuNormalizationType `json:"name"` Type string `json:"type,omitempty"` @@ -94,3 +94,13 @@ func NewIcuNormalizationTokenFilter() *IcuNormalizationTokenFilter { return r } + +// true + +type IcuNormalizationTokenFilterVariant interface { + IcuNormalizationTokenFilterCaster() *IcuNormalizationTokenFilter +} + +func (s *IcuNormalizationTokenFilter) IcuNormalizationTokenFilterCaster() *IcuNormalizationTokenFilter { + return s +} diff --git a/typedapi/types/icutokenizer.go b/typedapi/types/icutokenizer.go index 99e65ef73e..eab50ab021 100644 --- a/typedapi/types/icutokenizer.go +++ b/typedapi/types/icutokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IcuTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L30-L33 type IcuTokenizer struct { RuleFiles string `json:"rule_files"` Type string `json:"type,omitempty"` @@ -100,3 +100,13 @@ func NewIcuTokenizer() *IcuTokenizer { return r } + +// true + +type IcuTokenizerVariant interface { + IcuTokenizerCaster() *IcuTokenizer +} + +func (s *IcuTokenizer) IcuTokenizerCaster() *IcuTokenizer { + return s +} diff --git a/typedapi/types/icutransformtokenfilter.go b/typedapi/types/icutransformtokenfilter.go index 084de35307..9a9cdb4d67 100644 --- a/typedapi/types/icutransformtokenfilter.go +++ b/typedapi/types/icutransformtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // IcuTransformTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/icu-plugin.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/icu-plugin.ts#L24-L28 type IcuTransformTokenFilter struct { Dir *icutransformdirection.IcuTransformDirection `json:"dir,omitempty"` Id string `json:"id"` @@ -109,3 +109,13 @@ func NewIcuTransformTokenFilter() *IcuTransformTokenFilter { return r } + +// true + +type IcuTransformTokenFilterVariant interface { + IcuTransformTokenFilterCaster() *IcuTransformTokenFilter +} + +func (s *IcuTransformTokenFilter) IcuTransformTokenFilterCaster() *IcuTransformTokenFilter { + return s +} diff --git a/typedapi/types/ids.go b/typedapi/types/ids.go index d0c872bcc4..1a80607e8a 100644 --- a/typedapi/types/ids.go +++ b/typedapi/types/ids.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Ids type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L62-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L62-L62 type Ids []string + +type IdsVariant interface { + IdsCaster() *Ids +} diff --git a/typedapi/types/idsquery.go b/typedapi/types/idsquery.go index 1b5806919c..9f26789519 100644 --- a/typedapi/types/idsquery.go +++ b/typedapi/types/idsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IdsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L81-L86 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L88-L96 type IdsQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -114,3 +114,13 @@ func NewIdsQuery() *IdsQuery { return r } + +// true + +type IdsQueryVariant interface { + IdsQueryCaster() *IdsQuery +} + +func (s *IdsQuery) IdsQueryCaster() *IdsQuery { + return s +} diff --git a/typedapi/types/ilm.go b/typedapi/types/ilm.go index 9e2eeae7e3..6beed45fe7 100644 --- a/typedapi/types/ilm.go +++ b/typedapi/types/ilm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Ilm type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L162-L165 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L172-L175 type Ilm struct { PolicyCount int `json:"policy_count"` PolicyStats []IlmPolicyStatistics `json:"policy_stats"` @@ -84,3 +84,5 @@ func NewIlm() *Ilm { return r } + +// false diff --git a/typedapi/types/ilmactions.go b/typedapi/types/ilmactions.go index 14a1148ba9..60aa7ce102 100644 --- a/typedapi/types/ilmactions.go +++ b/typedapi/types/ilmactions.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IlmActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L42-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L39-L93 type IlmActions struct { // Allocate Phases allowed: warm, cold. Allocate *AllocateAction `json:"allocate,omitempty"` @@ -58,3 +58,13 @@ func NewIlmActions() *IlmActions { return r } + +// true + +type IlmActionsVariant interface { + IlmActionsCaster() *IlmActions +} + +func (s *IlmActions) IlmActionsCaster() *IlmActions { + return s +} diff --git a/typedapi/types/ilmindicator.go b/typedapi/types/ilmindicator.go index 5fbebad1d6..248d3adb17 100644 --- a/typedapi/types/ilmindicator.go +++ b/typedapi/types/ilmindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // IlmIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L146-L150 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L163-L167 type IlmIndicator struct { Details *IlmIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewIlmIndicator() *IlmIndicator { return r } + +// false diff --git a/typedapi/types/ilmindicatordetails.go b/typedapi/types/ilmindicatordetails.go index 410a93e17e..f2272673e3 100644 --- a/typedapi/types/ilmindicatordetails.go +++ b/typedapi/types/ilmindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // IlmIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L151-L155 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L168-L172 type IlmIndicatorDetails struct { IlmStatus lifecycleoperationmode.LifecycleOperationMode `json:"ilm_status"` Policies int64 `json:"policies"` @@ -102,3 +102,5 @@ func NewIlmIndicatorDetails() *IlmIndicatorDetails { return r } + +// false diff --git a/typedapi/types/ilmpolicy.go b/typedapi/types/ilmpolicy.go index 916eafb79c..c579709474 100644 --- a/typedapi/types/ilmpolicy.go +++ b/typedapi/types/ilmpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,8 +30,10 @@ import ( // IlmPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Policy.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Policy.ts#L23-L29 type IlmPolicy struct { + // Meta_ Arbitrary metadata that is not automatically generated or used by + // Elasticsearch. Meta_ Metadata `json:"_meta,omitempty"` Phases Phases `json:"phases"` } @@ -72,3 +74,13 @@ func NewIlmPolicy() *IlmPolicy { return r } + +// true + +type IlmPolicyVariant interface { + IlmPolicyCaster() *IlmPolicy +} + +func (s *IlmPolicy) IlmPolicyCaster() *IlmPolicy { + return s +} diff --git a/typedapi/types/ilmpolicystatistics.go b/typedapi/types/ilmpolicystatistics.go index 26f2864d62..d8095f181f 100644 --- a/typedapi/types/ilmpolicystatistics.go +++ b/typedapi/types/ilmpolicystatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,10 +31,10 @@ import ( // IlmPolicyStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L157-L160 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L167-L170 type IlmPolicyStatistics struct { - IndicesManaged int `json:"indices_managed"` - Phases Phases `json:"phases"` + IndicesManaged int `json:"indices_managed"` + Phases UsagePhases `json:"phases"` } func (s *IlmPolicyStatistics) UnmarshalJSON(data []byte) error { @@ -84,3 +84,5 @@ func NewIlmPolicyStatistics() *IlmPolicyStatistics { return r } + +// false diff --git a/typedapi/types/impact.go b/typedapi/types/impact.go index c31f064884..5f0ed678d1 100644 --- a/typedapi/types/impact.go +++ b/typedapi/types/impact.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // Impact type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L65-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L66-L71 type Impact struct { Description string `json:"description"` Id string `json:"id"` @@ -112,3 +112,5 @@ func NewImpact() *Impact { return r } + +// false diff --git a/typedapi/types/includedinvalidation.go b/typedapi/types/includedinvalidation.go index e84b65941c..3c49293cbf 100644 --- a/typedapi/types/includedinvalidation.go +++ b/typedapi/types/includedinvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // IncludedInValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L73-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L73-L76 type IncludedInValidation struct { Constraint []ScalarValue `json:"constraint"` Type string `json:"type,omitempty"` @@ -51,3 +51,13 @@ func NewIncludedInValidation() *IncludedInValidation { return r } + +// true + +type IncludedInValidationVariant interface { + IncludedInValidationCaster() *IncludedInValidation +} + +func (s *IncludedInValidation) IncludedInValidationCaster() *IncludedInValidation { + return s +} diff --git a/typedapi/types/indexaction.go b/typedapi/types/indexaction.go index 7e572619e2..5fd67753e0 100644 --- a/typedapi/types/indexaction.go +++ b/typedapi/types/indexaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // IndexAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L256-L265 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L256-L265 type IndexAction struct { DocId *string `json:"doc_id,omitempty"` ExecutionTimeField *string `json:"execution_time_field,omitempty"` @@ -99,3 +99,13 @@ func NewIndexAction() *IndexAction { return r } + +// true + +type IndexActionVariant interface { + IndexActionCaster() *IndexAction +} + +func (s *IndexAction) IndexActionCaster() *IndexAction { + return s +} diff --git a/typedapi/types/indexaliases.go b/typedapi/types/indexaliases.go index 40252526af..7ff00972ff 100644 --- a/typedapi/types/indexaliases.go +++ b/typedapi/types/indexaliases.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexAliases type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_alias/IndicesGetAliasResponse.ts#L37-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_alias/IndicesGetAliasResponse.ts#L37-L39 type IndexAliases struct { Aliases map[string]AliasDefinition `json:"aliases"` } @@ -30,8 +30,10 @@ type IndexAliases struct { // NewIndexAliases returns a IndexAliases. func NewIndexAliases() *IndexAliases { r := &IndexAliases{ - Aliases: make(map[string]AliasDefinition, 0), + Aliases: make(map[string]AliasDefinition), } return r } + +// false diff --git a/typedapi/types/indexanddatastreamaction.go b/typedapi/types/indexanddatastreamaction.go index 389f8ceab4..b17311f80d 100644 --- a/typedapi/types/indexanddatastreamaction.go +++ b/typedapi/types/indexanddatastreamaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IndexAndDataStreamAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/modify_data_stream/types.ts#L39-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/modify_data_stream/types.ts#L39-L44 type IndexAndDataStreamAction struct { // DataStream Data stream targeted by the action. DataStream string `json:"data_stream"` @@ -74,3 +74,13 @@ func NewIndexAndDataStreamAction() *IndexAndDataStreamAction { return r } + +// true + +type IndexAndDataStreamActionVariant interface { + IndexAndDataStreamActionCaster() *IndexAndDataStreamAction +} + +func (s *IndexAndDataStreamAction) IndexAndDataStreamActionCaster() *IndexAndDataStreamAction { + return s +} diff --git a/typedapi/types/indexcapabilities.go b/typedapi/types/indexcapabilities.go index 50b6a6db77..08a8b33936 100644 --- a/typedapi/types/indexcapabilities.go +++ b/typedapi/types/indexcapabilities.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexCapabilities type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_rollup_index_caps/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_rollup_index_caps/types.ts#L24-L26 type IndexCapabilities struct { RollupJobs []RollupJobSummary `json:"rollup_jobs"` } @@ -33,3 +33,5 @@ func NewIndexCapabilities() *IndexCapabilities { return r } + +// false diff --git a/typedapi/types/indexdetails.go b/typedapi/types/indexdetails.go index 911a50aa5e..a11dbad791 100644 --- a/typedapi/types/indexdetails.go +++ b/typedapi/types/indexdetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotIndexDetails.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotIndexDetails.ts#L23-L28 type IndexDetails struct { MaxSegmentsPerShard int64 `json:"max_segments_per_shard"` ShardCount int `json:"shard_count"` @@ -116,3 +116,5 @@ func NewIndexDetails() *IndexDetails { return r } + +// false diff --git a/typedapi/types/indexfield.go b/typedapi/types/indexfield.go index 51dff1f06d..d8d656145f 100644 --- a/typedapi/types/indexfield.go +++ b/typedapi/types/indexfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/meta-fields.ts#L46-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/meta-fields.ts#L46-L48 type IndexField struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewIndexField() *IndexField { return r } + +// true + +type IndexFieldVariant interface { + IndexFieldCaster() *IndexField +} + +func (s *IndexField) IndexFieldCaster() *IndexField { + return s +} diff --git a/typedapi/types/indexhealthstats.go b/typedapi/types/indexhealthstats.go index 6ba0250a30..60dabf7665 100644 --- a/typedapi/types/indexhealthstats.go +++ b/typedapi/types/indexhealthstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,17 +33,18 @@ import ( // IndexHealthStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/health/types.ts#L24-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/health/types.ts#L24-L35 type IndexHealthStats struct { - ActivePrimaryShards int `json:"active_primary_shards"` - ActiveShards int `json:"active_shards"` - InitializingShards int `json:"initializing_shards"` - NumberOfReplicas int `json:"number_of_replicas"` - NumberOfShards int `json:"number_of_shards"` - RelocatingShards int `json:"relocating_shards"` - Shards map[string]ShardHealthStats `json:"shards,omitempty"` - Status healthstatus.HealthStatus `json:"status"` - UnassignedShards int `json:"unassigned_shards"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + InitializingShards int `json:"initializing_shards"` + NumberOfReplicas int `json:"number_of_replicas"` + NumberOfShards int `json:"number_of_shards"` + RelocatingShards int `json:"relocating_shards"` + Shards map[string]ShardHealthStats `json:"shards,omitempty"` + Status healthstatus.HealthStatus `json:"status"` + UnassignedPrimaryShards int `json:"unassigned_primary_shards"` + UnassignedShards int `json:"unassigned_shards"` } func (s *IndexHealthStats) UnmarshalJSON(data []byte) error { @@ -170,6 +171,22 @@ func (s *IndexHealthStats) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Status", err) } + case "unassigned_primary_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "UnassignedPrimaryShards", err) + } + s.UnassignedPrimaryShards = value + case float64: + f := int(v) + s.UnassignedPrimaryShards = f + } + case "unassigned_shards": var tmp any @@ -194,8 +211,10 @@ func (s *IndexHealthStats) UnmarshalJSON(data []byte) error { // NewIndexHealthStats returns a IndexHealthStats. func NewIndexHealthStats() *IndexHealthStats { r := &IndexHealthStats{ - Shards: make(map[string]ShardHealthStats, 0), + Shards: make(map[string]ShardHealthStats), } return r } + +// false diff --git a/typedapi/types/indexingpressurememorysummary.go b/typedapi/types/indexingpressurememorysummary.go index fac68493d2..dc78042b72 100644 --- a/typedapi/types/indexingpressurememorysummary.go +++ b/typedapi/types/indexingpressurememorysummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexingPressureMemorySummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L580-L589 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L580-L589 type IndexingPressureMemorySummary struct { AllInBytes int64 `json:"all_in_bytes"` CombinedCoordinatingAndPrimaryInBytes int64 `json:"combined_coordinating_and_primary_in_bytes"` @@ -189,3 +189,5 @@ func NewIndexingPressureMemorySummary() *IndexingPressureMemorySummary { return r } + +// false diff --git a/typedapi/types/indexingslowlogsettings.go b/typedapi/types/indexingslowlogsettings.go index f7e582cfb3..e7414d147a 100644 --- a/typedapi/types/indexingslowlogsettings.go +++ b/typedapi/types/indexingslowlogsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexingSlowlogSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L563-L568 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L588-L593 type IndexingSlowlogSettings struct { Level *string `json:"level,omitempty"` Reformat *bool `json:"reformat,omitempty"` @@ -112,3 +112,13 @@ func NewIndexingSlowlogSettings() *IndexingSlowlogSettings { return r } + +// true + +type IndexingSlowlogSettingsVariant interface { + IndexingSlowlogSettingsCaster() *IndexingSlowlogSettings +} + +func (s *IndexingSlowlogSettings) IndexingSlowlogSettingsCaster() *IndexingSlowlogSettings { + return s +} diff --git a/typedapi/types/indexingslowlogtresholds.go b/typedapi/types/indexingslowlogtresholds.go index b368a39987..d1d188baaa 100644 --- a/typedapi/types/indexingslowlogtresholds.go +++ b/typedapi/types/indexingslowlogtresholds.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexingSlowlogTresholds type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L570-L577 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L595-L602 type IndexingSlowlogTresholds struct { // Index The indexing slow log, similar in functionality to the search slow log. The // log file name ends with `_index_indexing_slowlog.json`. @@ -36,3 +36,13 @@ func NewIndexingSlowlogTresholds() *IndexingSlowlogTresholds { return r } + +// true + +type IndexingSlowlogTresholdsVariant interface { + IndexingSlowlogTresholdsCaster() *IndexingSlowlogTresholds +} + +func (s *IndexingSlowlogTresholds) IndexingSlowlogTresholdsCaster() *IndexingSlowlogTresholds { + return s +} diff --git a/typedapi/types/indexingstats.go b/typedapi/types/indexingstats.go index a0ba1bcea9..90be31e2fc 100644 --- a/typedapi/types/indexingstats.go +++ b/typedapi/types/indexingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L143-L159 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L146-L162 type IndexingStats struct { DeleteCurrent int64 `json:"delete_current"` DeleteTime Duration `json:"delete_time,omitempty"` @@ -231,8 +231,10 @@ func (s *IndexingStats) UnmarshalJSON(data []byte) error { // NewIndexingStats returns a IndexingStats. func NewIndexingStats() *IndexingStats { r := &IndexingStats{ - Types: make(map[string]IndexingStats, 0), + Types: make(map[string]IndexingStats), } return r } + +// false diff --git a/typedapi/types/indexmappingrecord.go b/typedapi/types/indexmappingrecord.go index 0a402cc637..01b0344d7f 100644 --- a/typedapi/types/indexmappingrecord.go +++ b/typedapi/types/indexmappingrecord.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexMappingRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L29-L32 type IndexMappingRecord struct { Item *TypeMapping `json:"item,omitempty"` Mappings TypeMapping `json:"mappings"` @@ -34,3 +34,5 @@ func NewIndexMappingRecord() *IndexMappingRecord { return r } + +// false diff --git a/typedapi/types/indexoperation.go b/typedapi/types/indexoperation.go index 70694e361b..79737d98a8 100644 --- a/typedapi/types/indexoperation.go +++ b/typedapi/types/indexoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,30 +33,30 @@ import ( // IndexOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/types.ts#L132-L132 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/types.ts#L142-L142 type IndexOperation struct { // DynamicTemplates A map from the full name of fields to the name of dynamic templates. - // Defaults to an empty map. - // If a name matches a dynamic template, then that template will be applied + // It defaults to an empty map. + // If a name matches a dynamic template, that template will be applied // regardless of other match predicates defined in the template. - // If a field is already defined in the mapping, then this parameter won’t be + // If a field is already defined in the mapping, then this parameter won't be // used. DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` // Id_ The document ID. Id_ *string `json:"_id,omitempty"` IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. + // Index_ The name of the index or index alias to perform the action on. Index_ *string `json:"_index,omitempty"` - // Pipeline ID of the pipeline to use to preprocess incoming documents. - // If the index has a default ingest pipeline specified, then setting the value - // to `_none` disables the default ingest pipeline for this request. - // If a final pipeline is configured it will always run, regardless of the value + // Pipeline The ID of the pipeline to use to preprocess incoming documents. + // If the index has a default ingest pipeline specified, setting the value to + // `_none` turns off the default ingest pipeline for this request. + // If a final pipeline is configured, it will always run regardless of the value // of this parameter. Pipeline *string `json:"pipeline,omitempty"` - // RequireAlias If `true`, the request’s actions must target an index alias. + // RequireAlias If `true`, the request's actions must target an index alias. RequireAlias *bool `json:"require_alias,omitempty"` - // Routing Custom value used to route operations to a specific shard. + // Routing A custom value used to route operations to a specific shard. Routing *string `json:"routing,omitempty"` Version *int64 `json:"version,omitempty"` VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -164,8 +164,18 @@ func (s *IndexOperation) UnmarshalJSON(data []byte) error { // NewIndexOperation returns a IndexOperation. func NewIndexOperation() *IndexOperation { r := &IndexOperation{ - DynamicTemplates: make(map[string]string, 0), + DynamicTemplates: make(map[string]string), } return r } + +// true + +type IndexOperationVariant interface { + IndexOperationCaster() *IndexOperation +} + +func (s *IndexOperation) IndexOperationCaster() *IndexOperation { + return s +} diff --git a/typedapi/types/indexprivilegescheck.go b/typedapi/types/indexprivilegescheck.go index 16eb4e6cbd..97d6941dd2 100644 --- a/typedapi/types/indexprivilegescheck.go +++ b/typedapi/types/indexprivilegescheck.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,16 +33,16 @@ import ( // IndexPrivilegesCheck type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges/types.ts#L33-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges/types.ts#L34-L45 type IndexPrivilegesCheck struct { - // AllowRestrictedIndices This needs to be set to true (default is false) if using wildcards or regexps - // for patterns that cover restricted indices. + // AllowRestrictedIndices This needs to be set to `true` (default is `false`) if using wildcards or + // regexps for patterns that cover restricted indices. // Implicitly, restricted indices do not match index patterns because restricted // indices usually have limited privileges and including them in pattern tests // would render most such tests false. // If restricted indices are explicitly included in the names list, privileges // will be checked against them regardless of the value of - // allow_restricted_indices. + // `allow_restricted_indices`. AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` // Names A list of indices. Names []string `json:"names"` @@ -111,3 +111,13 @@ func NewIndexPrivilegesCheck() *IndexPrivilegesCheck { return r } + +// true + +type IndexPrivilegesCheckVariant interface { + IndexPrivilegesCheckCaster() *IndexPrivilegesCheck +} + +func (s *IndexPrivilegesCheck) IndexPrivilegesCheckCaster() *IndexPrivilegesCheck { + return s +} diff --git a/typedapi/types/indexresult.go b/typedapi/types/indexresult.go index 031dff0b9d..ef14b72e9e 100644 --- a/typedapi/types/indexresult.go +++ b/typedapi/types/indexresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L267-L269 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L267-L269 type IndexResult struct { Response IndexResultSummary `json:"response"` } @@ -33,3 +33,5 @@ func NewIndexResult() *IndexResult { return r } + +// false diff --git a/typedapi/types/indexresultsummary.go b/typedapi/types/indexresultsummary.go index a7e27839c1..e338aabe0e 100644 --- a/typedapi/types/indexresultsummary.go +++ b/typedapi/types/indexresultsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // IndexResultSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L271-L277 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L271-L277 type IndexResultSummary struct { Created bool `json:"created"` Id string `json:"id"` @@ -102,3 +102,5 @@ func NewIndexResultSummary() *IndexResultSummary { return r } + +// false diff --git a/typedapi/types/indexrouting.go b/typedapi/types/indexrouting.go index f0f28b4d3f..62d583a218 100644 --- a/typedapi/types/indexrouting.go +++ b/typedapi/types/indexrouting.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexRouting type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexRouting.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexRouting.ts#L22-L25 type IndexRouting struct { Allocation *IndexRoutingAllocation `json:"allocation,omitempty"` Rebalance *IndexRoutingRebalance `json:"rebalance,omitempty"` @@ -34,3 +34,13 @@ func NewIndexRouting() *IndexRouting { return r } + +// true + +type IndexRoutingVariant interface { + IndexRoutingCaster() *IndexRouting +} + +func (s *IndexRouting) IndexRoutingCaster() *IndexRouting { + return s +} diff --git a/typedapi/types/indexroutingallocation.go b/typedapi/types/indexroutingallocation.go index 83e9937a55..71d472fa92 100644 --- a/typedapi/types/indexroutingallocation.go +++ b/typedapi/types/indexroutingallocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // IndexRoutingAllocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexRouting.ts#L27-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexRouting.ts#L27-L32 type IndexRoutingAllocation struct { Disk *IndexRoutingAllocationDisk `json:"disk,omitempty"` Enable *indexroutingallocationoptions.IndexRoutingAllocationOptions `json:"enable,omitempty"` @@ -40,3 +40,13 @@ func NewIndexRoutingAllocation() *IndexRoutingAllocation { return r } + +// true + +type IndexRoutingAllocationVariant interface { + IndexRoutingAllocationCaster() *IndexRoutingAllocation +} + +func (s *IndexRoutingAllocation) IndexRoutingAllocationCaster() *IndexRoutingAllocation { + return s +} diff --git a/typedapi/types/indexroutingallocationdisk.go b/typedapi/types/indexroutingallocationdisk.go index 7948124b06..6ef5e084a5 100644 --- a/typedapi/types/indexroutingallocationdisk.go +++ b/typedapi/types/indexroutingallocationdisk.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexRoutingAllocationDisk type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexRouting.ts#L62-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexRouting.ts#L62-L64 type IndexRoutingAllocationDisk struct { ThresholdEnabled string `json:"threshold_enabled,omitempty"` } @@ -74,3 +74,13 @@ func NewIndexRoutingAllocationDisk() *IndexRoutingAllocationDisk { return r } + +// true + +type IndexRoutingAllocationDiskVariant interface { + IndexRoutingAllocationDiskCaster() *IndexRoutingAllocationDisk +} + +func (s *IndexRoutingAllocationDisk) IndexRoutingAllocationDiskCaster() *IndexRoutingAllocationDisk { + return s +} diff --git a/typedapi/types/indexroutingallocationinclude.go b/typedapi/types/indexroutingallocationinclude.go index 81fcdcfdbe..a9cac9a91a 100644 --- a/typedapi/types/indexroutingallocationinclude.go +++ b/typedapi/types/indexroutingallocationinclude.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexRoutingAllocationInclude type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexRouting.ts#L52-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexRouting.ts#L52-L55 type IndexRoutingAllocationInclude struct { Id_ *string `json:"_id,omitempty"` TierPreference_ *string `json:"_tier_preference,omitempty"` @@ -80,3 +80,13 @@ func NewIndexRoutingAllocationInclude() *IndexRoutingAllocationInclude { return r } + +// true + +type IndexRoutingAllocationIncludeVariant interface { + IndexRoutingAllocationIncludeCaster() *IndexRoutingAllocationInclude +} + +func (s *IndexRoutingAllocationInclude) IndexRoutingAllocationIncludeCaster() *IndexRoutingAllocationInclude { + return s +} diff --git a/typedapi/types/indexroutingallocationinitialrecovery.go b/typedapi/types/indexroutingallocationinitialrecovery.go index 1449cd8c98..5a26376e76 100644 --- a/typedapi/types/indexroutingallocationinitialrecovery.go +++ b/typedapi/types/indexroutingallocationinitialrecovery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IndexRoutingAllocationInitialRecovery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexRouting.ts#L57-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexRouting.ts#L57-L59 type IndexRoutingAllocationInitialRecovery struct { Id_ *string `json:"_id,omitempty"` } @@ -66,3 +66,13 @@ func NewIndexRoutingAllocationInitialRecovery() *IndexRoutingAllocationInitialRe return r } + +// true + +type IndexRoutingAllocationInitialRecoveryVariant interface { + IndexRoutingAllocationInitialRecoveryCaster() *IndexRoutingAllocationInitialRecovery +} + +func (s *IndexRoutingAllocationInitialRecovery) IndexRoutingAllocationInitialRecoveryCaster() *IndexRoutingAllocationInitialRecovery { + return s +} diff --git a/typedapi/types/indexroutingrebalance.go b/typedapi/types/indexroutingrebalance.go index 6c1e0f854f..e6545e7ef7 100644 --- a/typedapi/types/indexroutingrebalance.go +++ b/typedapi/types/indexroutingrebalance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // IndexRoutingRebalance type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexRouting.ts#L34-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexRouting.ts#L34-L36 type IndexRoutingRebalance struct { Enable indexroutingrebalanceoptions.IndexRoutingRebalanceOptions `json:"enable"` } @@ -37,3 +37,13 @@ func NewIndexRoutingRebalance() *IndexRoutingRebalance { return r } + +// true + +type IndexRoutingRebalanceVariant interface { + IndexRoutingRebalanceCaster() *IndexRoutingRebalance +} + +func (s *IndexRoutingRebalance) IndexRoutingRebalanceCaster() *IndexRoutingRebalance { + return s +} diff --git a/typedapi/types/indexsegment.go b/typedapi/types/indexsegment.go index 338bb0e741..ec7a83f9a9 100644 --- a/typedapi/types/indexsegment.go +++ b/typedapi/types/indexsegment.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexSegment type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/segments/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/segments/types.ts#L24-L26 type IndexSegment struct { Shards map[string][]ShardsSegment `json:"shards"` } @@ -30,8 +30,10 @@ type IndexSegment struct { // NewIndexSegment returns a IndexSegment. func NewIndexSegment() *IndexSegment { r := &IndexSegment{ - Shards: make(map[string][]ShardsSegment, 0), + Shards: make(map[string][]ShardsSegment), } return r } + +// false diff --git a/typedapi/types/indexsegmentsort.go b/typedapi/types/indexsegmentsort.go index 641185056a..5b5a687d61 100644 --- a/typedapi/types/indexsegmentsort.go +++ b/typedapi/types/indexsegmentsort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // IndexSegmentSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSegmentSort.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSegmentSort.ts#L22-L27 type IndexSegmentSort struct { Field []string `json:"field,omitempty"` Missing []segmentsortmissing.SegmentSortMissing `json:"missing,omitempty"` @@ -132,3 +132,13 @@ func NewIndexSegmentSort() *IndexSegmentSort { return r } + +// true + +type IndexSegmentSortVariant interface { + IndexSegmentSortCaster() *IndexSegmentSort +} + +func (s *IndexSegmentSort) IndexSegmentSortCaster() *IndexSegmentSort { + return s +} diff --git a/typedapi/types/indexsettingblocks.go b/typedapi/types/indexsettingblocks.go index 12e98f693f..51d233f6e7 100644 --- a/typedapi/types/indexsettingblocks.go +++ b/typedapi/types/indexsettingblocks.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IndexSettingBlocks type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L254-L260 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L262-L268 type IndexSettingBlocks struct { Metadata Stringifiedboolean `json:"metadata,omitempty"` Read Stringifiedboolean `json:"read,omitempty"` @@ -90,3 +90,13 @@ func NewIndexSettingBlocks() *IndexSettingBlocks { return r } + +// true + +type IndexSettingBlocksVariant interface { + IndexSettingBlocksCaster() *IndexSettingBlocks +} + +func (s *IndexSettingBlocks) IndexSettingBlocksCaster() *IndexSettingBlocks { + return s +} diff --git a/typedapi/types/indexsettings.go b/typedapi/types/indexsettings.go index 3529b0f30f..7407b95eac 100644 --- a/typedapi/types/indexsettings.go +++ b/typedapi/types/indexsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,13 +33,13 @@ import ( // IndexSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L69-L169 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L70-L176 type IndexSettings struct { Analysis *IndexSettingsAnalysis `json:"analysis,omitempty"` // Analyze Settings to define analyzers, tokenizers, token filters and character // filters. Analyze *SettingsAnalyze `json:"analyze,omitempty"` - AutoExpandReplicas *string `json:"auto_expand_replicas,omitempty"` + AutoExpandReplicas any `json:"auto_expand_replicas,omitempty"` Blocks *IndexSettingBlocks `json:"blocks,omitempty"` CheckOnStartup *indexcheckonstartup.IndexCheckOnStartup `json:"check_on_startup,omitempty"` Codec *string `json:"codec,omitempty"` @@ -128,16 +128,9 @@ func (s *IndexSettings) UnmarshalJSON(data []byte) error { } case "auto_expand_replicas": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.AutoExpandReplicas); err != nil { return fmt.Errorf("%s | %w", "AutoExpandReplicas", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.AutoExpandReplicas = &o case "blocks": if err := dec.Decode(&s.Blocks); err != nil { @@ -572,55 +565,55 @@ func (s *IndexSettings) UnmarshalJSON(data []byte) error { case "BM25": oo := NewSettingsSimilarityBm25() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "boolean": oo := NewSettingsSimilarityBoolean() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "DFI": oo := NewSettingsSimilarityDfi() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "DFR": oo := NewSettingsSimilarityDfr() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "IB": oo := NewSettingsSimilarityIb() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "LMDirichlet": oo := NewSettingsSimilarityLmd() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "LMJelinekMercer": oo := NewSettingsSimilarityLmj() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo case "scripted": oo := NewSettingsSimilarityScripted() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Similarity | %w", err) } s.Similarity[key] = oo default: oo := new(SettingsSimilarity) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(SettingsSimilarity) | %w", err) } s.Similarity[key] = oo } @@ -739,9 +732,19 @@ func (s IndexSettings) MarshalJSON() ([]byte, error) { // NewIndexSettings returns a IndexSettings. func NewIndexSettings() *IndexSettings { r := &IndexSettings{ - IndexSettings: make(map[string]json.RawMessage, 0), - Similarity: make(map[string]SettingsSimilarity, 0), + IndexSettings: make(map[string]json.RawMessage), + Similarity: make(map[string]SettingsSimilarity), } return r } + +// true + +type IndexSettingsVariant interface { + IndexSettingsCaster() *IndexSettings +} + +func (s *IndexSettings) IndexSettingsCaster() *IndexSettings { + return s +} diff --git a/typedapi/types/indexsettingsanalysis.go b/typedapi/types/indexsettingsanalysis.go index 8b9ccb616a..e6f293cc5d 100644 --- a/typedapi/types/indexsettingsanalysis.go +++ b/typedapi/types/indexsettingsanalysis.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -24,12 +24,13 @@ import ( "bytes" "encoding/json" "errors" + "fmt" "io" ) // IndexSettingsAnalysis type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L319-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L333-L339 type IndexSettingsAnalysis struct { Analyzer map[string]Analyzer `json:"analyzer,omitempty"` CharFilter map[string]CharFilter `json:"char_filter,omitempty"` @@ -72,91 +73,301 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "custom": oo := NewCustomAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "fingerprint": oo := NewFingerprintAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "keyword": oo := NewKeywordAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err - } - s.Analyzer[key] = oo - case "language": - oo := NewLanguageAnalyzer() - if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "nori": oo := NewNoriAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "pattern": oo := NewPatternAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "simple": oo := NewSimpleAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "standard": oo := NewStandardAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "stop": oo := NewStopAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "whitespace": oo := NewWhitespaceAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "icu_analyzer": oo := NewIcuAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "kuromoji": oo := NewKuromojiAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "snowball": oo := NewSnowballAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "arabic": + oo := NewArabicAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "armenian": + oo := NewArmenianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "basque": + oo := NewBasqueAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "bengali": + oo := NewBengaliAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "brazilian": + oo := NewBrazilianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "bulgarian": + oo := NewBulgarianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "catalan": + oo := NewCatalanAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "chinese": + oo := NewChineseAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "cjk": + oo := NewCjkAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "czech": + oo := NewCzechAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "danish": + oo := NewDanishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo case "dutch": oo := NewDutchAnalyzer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "english": + oo := NewEnglishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "estonian": + oo := NewEstonianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "finnish": + oo := NewFinnishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "french": + oo := NewFrenchAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "galician": + oo := NewGalicianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "german": + oo := NewGermanAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "greek": + oo := NewGreekAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "hindi": + oo := NewHindiAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "hungarian": + oo := NewHungarianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "indonesian": + oo := NewIndonesianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "irish": + oo := NewIrishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "italian": + oo := NewItalianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "latvian": + oo := NewLatvianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "lithuanian": + oo := NewLithuanianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "norwegian": + oo := NewNorwegianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "persian": + oo := NewPersianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "portuguese": + oo := NewPortugueseAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "romanian": + oo := NewRomanianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "russian": + oo := NewRussianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "serbian": + oo := NewSerbianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "sorani": + oo := NewSoraniAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "spanish": + oo := NewSpanishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "swedish": + oo := NewSwedishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "turkish": + oo := NewTurkishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "thai": + oo := NewThaiAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) } s.Analyzer[key] = oo default: oo := new(Analyzer) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Analyzer) | %w", err) } s.Analyzer[key] = oo } @@ -179,37 +390,37 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "html_strip": oo := NewHtmlStripCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo case "mapping": oo := NewMappingCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo case "pattern_replace": oo := NewPatternReplaceCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo case "icu_normalizer": oo := NewIcuNormalizationCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo case "kuromoji_iteration_mark": oo := NewKuromojiIterationMarkCharFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("CharFilter | %w", err) } s.CharFilter[key] = oo default: oo := new(CharFilter) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(CharFilter) | %w", err) } s.CharFilter[key] = oo } @@ -232,289 +443,289 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "asciifolding": oo := NewAsciiFoldingTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "common_grams": oo := NewCommonGramsTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "condition": oo := NewConditionTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "delimited_payload": oo := NewDelimitedPayloadTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "edge_ngram": oo := NewEdgeNGramTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "elision": oo := NewElisionTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "fingerprint": oo := NewFingerprintTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "hunspell": oo := NewHunspellTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "hyphenation_decompounder": oo := NewHyphenationDecompounderTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "keep_types": oo := NewKeepTypesTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "keep": oo := NewKeepWordsTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "keyword_marker": oo := NewKeywordMarkerTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "kstem": oo := NewKStemTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "length": oo := NewLengthTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "limit": oo := NewLimitTokenCountTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "lowercase": oo := NewLowercaseTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "multiplexer": oo := NewMultiplexerTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "ngram": oo := NewNGramTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "nori_part_of_speech": oo := NewNoriPartOfSpeechTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "pattern_capture": oo := NewPatternCaptureTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "pattern_replace": oo := NewPatternReplaceTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "porter_stem": oo := NewPorterStemTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "predicate_token_filter": oo := NewPredicateTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "remove_duplicates": oo := NewRemoveDuplicatesTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "reverse": oo := NewReverseTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "shingle": oo := NewShingleTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "snowball": oo := NewSnowballTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "stemmer_override": oo := NewStemmerOverrideTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "stemmer": oo := NewStemmerTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "stop": oo := NewStopTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "synonym_graph": oo := NewSynonymGraphTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "synonym": oo := NewSynonymTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "trim": oo := NewTrimTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "truncate": oo := NewTruncateTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "unique": oo := NewUniqueTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "uppercase": oo := NewUppercaseTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "word_delimiter_graph": oo := NewWordDelimiterGraphTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "word_delimiter": oo := NewWordDelimiterTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "kuromoji_stemmer": oo := NewKuromojiStemmerTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "kuromoji_readingform": oo := NewKuromojiReadingFormTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "kuromoji_part_of_speech": oo := NewKuromojiPartOfSpeechTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "icu_collation": oo := NewIcuCollationTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "icu_folding": oo := NewIcuFoldingTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "icu_normalizer": oo := NewIcuNormalizationTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "icu_transform": oo := NewIcuTransformTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "phonetic": oo := NewPhoneticTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo case "dictionary_decompounder": oo := NewDictionaryDecompounderTokenFilter() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Filter | %w", err) } s.Filter[key] = oo default: oo := new(TokenFilter) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(TokenFilter) | %w", err) } s.Filter[key] = oo } @@ -539,19 +750,19 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "lowercase": oo := NewLowercaseNormalizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Normalizer | %w", err) } s.Normalizer[key] = oo case "custom": oo := NewCustomNormalizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Normalizer | %w", err) } s.Normalizer[key] = oo default: oo := new(Normalizer) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Normalizer) | %w", err) } s.Normalizer[key] = oo } @@ -574,91 +785,115 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { case "char_group": oo := NewCharGroupTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "classic": + oo := NewClassicTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "edge_ngram": oo := NewEdgeNGramTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "keyword": oo := NewKeywordTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "letter": oo := NewLetterTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "lowercase": oo := NewLowercaseTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "ngram": oo := NewNGramTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo - case "nori_tokenizer": - oo := NewNoriTokenizer() + case "path_hierarchy": + oo := NewPathHierarchyTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo - case "path_hierarchy": - oo := NewPathHierarchyTokenizer() + case "pattern": + oo := NewPatternTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "simple_pattern": + oo := NewSimplePatternTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "simple_pattern_split": + oo := NewSimplePatternSplitTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "standard": oo := NewStandardTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "thai": + oo := NewThaiTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "uax_url_email": oo := NewUaxEmailUrlTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo case "whitespace": oo := NewWhitespaceTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo - case "kuromoji_tokenizer": - oo := NewKuromojiTokenizer() + case "icu_tokenizer": + oo := NewIcuTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo - case "pattern": - oo := NewPatternTokenizer() + case "kuromoji_tokenizer": + oo := NewKuromojiTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo - case "icu_tokenizer": - oo := NewIcuTokenizer() + case "nori_tokenizer": + oo := NewNoriTokenizer() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Tokenizer | %w", err) } s.Tokenizer[key] = oo default: oo := new(Tokenizer) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Tokenizer) | %w", err) } s.Tokenizer[key] = oo } @@ -672,12 +907,22 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { // NewIndexSettingsAnalysis returns a IndexSettingsAnalysis. func NewIndexSettingsAnalysis() *IndexSettingsAnalysis { r := &IndexSettingsAnalysis{ - Analyzer: make(map[string]Analyzer, 0), - CharFilter: make(map[string]CharFilter, 0), - Filter: make(map[string]TokenFilter, 0), - Normalizer: make(map[string]Normalizer, 0), - Tokenizer: make(map[string]Tokenizer, 0), + Analyzer: make(map[string]Analyzer), + CharFilter: make(map[string]CharFilter), + Filter: make(map[string]TokenFilter), + Normalizer: make(map[string]Normalizer), + Tokenizer: make(map[string]Tokenizer), } return r } + +// true + +type IndexSettingsAnalysisVariant interface { + IndexSettingsAnalysisCaster() *IndexSettingsAnalysis +} + +func (s *IndexSettingsAnalysis) IndexSettingsAnalysisCaster() *IndexSettingsAnalysis { + return s +} diff --git a/typedapi/types/indexsettingslifecycle.go b/typedapi/types/indexsettingslifecycle.go index 2c0672d365..50a99ad1ea 100644 --- a/typedapi/types/indexsettingslifecycle.go +++ b/typedapi/types/indexsettingslifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexSettingsLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L276-L309 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L284-L323 type IndexSettingsLifecycle struct { // IndexingComplete Indicates whether or not the index has been rolled over. Automatically set to // true when ILM completes the rollover action. @@ -55,6 +55,10 @@ type IndexSettingsLifecycle struct { // for example logs-2016.10.31-000002). If the index name doesn’t match the // pattern, index creation fails. ParseOriginationDate *bool `json:"parse_origination_date,omitempty"` + // PreferIlm Preference for the system that manages a data stream backing index + // (preferring ILM when both ILM and DLM are + // applicable for an index). + PreferIlm string `json:"prefer_ilm,omitempty"` // RolloverAlias The index alias to update when the index rolls over. Specify when using a // policy that contains a rollover action. // When the index rolls over, the alias is updated to reflect that the index is @@ -118,6 +122,18 @@ func (s *IndexSettingsLifecycle) UnmarshalJSON(data []byte) error { s.ParseOriginationDate = &v } + case "prefer_ilm": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PreferIlm", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PreferIlm = o + case "rollover_alias": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -146,3 +162,13 @@ func NewIndexSettingsLifecycle() *IndexSettingsLifecycle { return r } + +// true + +type IndexSettingsLifecycleVariant interface { + IndexSettingsLifecycleCaster() *IndexSettingsLifecycle +} + +func (s *IndexSettingsLifecycle) IndexSettingsLifecycleCaster() *IndexSettingsLifecycle { + return s +} diff --git a/typedapi/types/indexsettingslifecyclestep.go b/typedapi/types/indexsettingslifecyclestep.go index 74892f3e5f..5033145426 100644 --- a/typedapi/types/indexsettingslifecyclestep.go +++ b/typedapi/types/indexsettingslifecyclestep.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IndexSettingsLifecycleStep type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L311-L317 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L325-L331 type IndexSettingsLifecycleStep struct { // WaitTimeThreshold Time to wait for the cluster to resolve allocation issues during an ILM // shrink action. Must be greater than 1h (1 hour). @@ -69,3 +69,13 @@ func NewIndexSettingsLifecycleStep() *IndexSettingsLifecycleStep { return r } + +// true + +type IndexSettingsLifecycleStepVariant interface { + IndexSettingsLifecycleStepCaster() *IndexSettingsLifecycleStep +} + +func (s *IndexSettingsLifecycleStep) IndexSettingsLifecycleStepCaster() *IndexSettingsLifecycleStep { + return s +} diff --git a/typedapi/types/indexsettingstimeseries.go b/typedapi/types/indexsettingstimeseries.go index e4b1701aa8..084980ce5e 100644 --- a/typedapi/types/indexsettingstimeseries.go +++ b/typedapi/types/indexsettingstimeseries.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IndexSettingsTimeSeries type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L327-L330 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L341-L344 type IndexSettingsTimeSeries struct { EndTime DateTime `json:"end_time,omitempty"` StartTime DateTime `json:"start_time,omitempty"` @@ -72,3 +72,13 @@ func NewIndexSettingsTimeSeries() *IndexSettingsTimeSeries { return r } + +// true + +type IndexSettingsTimeSeriesVariant interface { + IndexSettingsTimeSeriesCaster() *IndexSettingsTimeSeries +} + +func (s *IndexSettingsTimeSeries) IndexSettingsTimeSeriesCaster() *IndexSettingsTimeSeries { + return s +} diff --git a/typedapi/types/indexstate.go b/typedapi/types/indexstate.go index 5da696984d..ca3a78ce41 100644 --- a/typedapi/types/indexstate.go +++ b/typedapi/types/indexstate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IndexState type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexState.ts#L27-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexState.ts#L27-L40 type IndexState struct { Aliases map[string]Alias `json:"aliases,omitempty"` DataStream *string `json:"data_stream,omitempty"` @@ -98,8 +98,18 @@ func (s *IndexState) UnmarshalJSON(data []byte) error { // NewIndexState returns a IndexState. func NewIndexState() *IndexState { r := &IndexState{ - Aliases: make(map[string]Alias, 0), + Aliases: make(map[string]Alias), } return r } + +// true + +type IndexStateVariant interface { + IndexStateCaster() *IndexState +} + +func (s *IndexState) IndexStateCaster() *IndexState { + return s +} diff --git a/typedapi/types/indexstats.go b/typedapi/types/indexstats.go index 858733abbb..dac8f114de 100644 --- a/typedapi/types/indexstats.go +++ b/typedapi/types/indexstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L52-L93 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L52-L93 type IndexStats struct { Bulk *BulkStats `json:"bulk,omitempty"` // Completion Contains statistics about completions across all shards assigned to the node. @@ -72,3 +72,5 @@ func NewIndexStats() *IndexStats { return r } + +// false diff --git a/typedapi/types/indextemplate.go b/typedapi/types/indextemplate.go index ecbbc53ff0..d334ee64bd 100644 --- a/typedapi/types/indextemplate.go +++ b/typedapi/types/indextemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexTemplate.ts#L31-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexTemplate.ts#L28-L81 type IndexTemplate struct { AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` // ComposedOf An ordered list of component template names. @@ -43,6 +43,13 @@ type IndexTemplate struct { // Supports an empty object. // Data streams require a matching index template with a `data_stream` object. DataStream *IndexTemplateDataStreamConfiguration `json:"data_stream,omitempty"` + // Deprecated Marks this index template as deprecated. + // When creating or updating a non-deprecated index template that uses + // deprecated components, + // Elasticsearch will emit a deprecation warning. + Deprecated *bool `json:"deprecated,omitempty"` + // IgnoreMissingComponentTemplates A list of component template names that are allowed to be absent. + IgnoreMissingComponentTemplates []string `json:"ignore_missing_component_templates,omitempty"` // IndexPatterns Name of the index template. IndexPatterns []string `json:"index_patterns"` // Meta_ Optional user metadata about the index template. May have any contents. @@ -103,6 +110,36 @@ func (s *IndexTemplate) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "DataStream", err) } + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + + case "ignore_missing_component_templates": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissingComponentTemplates", err) + } + + s.IgnoreMissingComponentTemplates = append(s.IgnoreMissingComponentTemplates, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.IgnoreMissingComponentTemplates); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissingComponentTemplates", err) + } + } + case "index_patterns": rawMsg := json.RawMessage{} dec.Decode(&rawMsg) @@ -160,3 +197,13 @@ func NewIndexTemplate() *IndexTemplate { return r } + +// true + +type IndexTemplateVariant interface { + IndexTemplateCaster() *IndexTemplate +} + +func (s *IndexTemplate) IndexTemplateCaster() *IndexTemplate { + return s +} diff --git a/typedapi/types/indextemplatedatastreamconfiguration.go b/typedapi/types/indextemplatedatastreamconfiguration.go index c33afcdd17..be96b03d79 100644 --- a/typedapi/types/indextemplatedatastreamconfiguration.go +++ b/typedapi/types/indextemplatedatastreamconfiguration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexTemplateDataStreamConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexTemplate.ts#L72-L83 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexTemplate.ts#L83-L94 type IndexTemplateDataStreamConfiguration struct { // AllowCustomRouting If true, the data stream supports custom routing. AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` @@ -93,3 +93,13 @@ func NewIndexTemplateDataStreamConfiguration() *IndexTemplateDataStreamConfigura return r } + +// true + +type IndexTemplateDataStreamConfigurationVariant interface { + IndexTemplateDataStreamConfigurationCaster() *IndexTemplateDataStreamConfiguration +} + +func (s *IndexTemplateDataStreamConfiguration) IndexTemplateDataStreamConfigurationCaster() *IndexTemplateDataStreamConfiguration { + return s +} diff --git a/typedapi/types/indextemplateitem.go b/typedapi/types/indextemplateitem.go index 33c586e5f0..bbce555c1e 100644 --- a/typedapi/types/indextemplateitem.go +++ b/typedapi/types/indextemplateitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IndexTemplateItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32 type IndexTemplateItem struct { IndexTemplate IndexTemplate `json:"index_template"` Name string `json:"name"` @@ -72,3 +72,5 @@ func NewIndexTemplateItem() *IndexTemplateItem { return r } + +// false diff --git a/typedapi/types/indextemplatemapping.go b/typedapi/types/indextemplatemapping.go index 8fb8505695..bc7fa102d5 100644 --- a/typedapi/types/indextemplatemapping.go +++ b/typedapi/types/indextemplatemapping.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexTemplateMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L121-L143 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L159-L181 type IndexTemplateMapping struct { // Aliases Aliases to add. // If the index template includes a `data_stream` object, these are data stream @@ -43,8 +43,18 @@ type IndexTemplateMapping struct { // NewIndexTemplateMapping returns a IndexTemplateMapping. func NewIndexTemplateMapping() *IndexTemplateMapping { r := &IndexTemplateMapping{ - Aliases: make(map[string]Alias, 0), + Aliases: make(map[string]Alias), } return r } + +// true + +type IndexTemplateMappingVariant interface { + IndexTemplateMappingCaster() *IndexTemplateMapping +} + +func (s *IndexTemplateMapping) IndexTemplateMappingCaster() *IndexTemplateMapping { + return s +} diff --git a/typedapi/types/indextemplatesummary.go b/typedapi/types/indextemplatesummary.go index 9a88c18f54..a5dee8632f 100644 --- a/typedapi/types/indextemplatesummary.go +++ b/typedapi/types/indextemplatesummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndexTemplateSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexTemplate.ts#L85-L107 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexTemplate.ts#L96-L118 type IndexTemplateSummary struct { // Aliases Aliases to add. // If the index template includes a `data_stream` object, these are data stream @@ -43,8 +43,18 @@ type IndexTemplateSummary struct { // NewIndexTemplateSummary returns a IndexTemplateSummary. func NewIndexTemplateSummary() *IndexTemplateSummary { r := &IndexTemplateSummary{ - Aliases: make(map[string]Alias, 0), + Aliases: make(map[string]Alias), } return r } + +// true + +type IndexTemplateSummaryVariant interface { + IndexTemplateSummaryCaster() *IndexTemplateSummary +} + +func (s *IndexTemplateSummary) IndexTemplateSummaryCaster() *IndexTemplateSummary { + return s +} diff --git a/typedapi/types/indexversioning.go b/typedapi/types/indexversioning.go index 81c6e355f7..f1b69ba673 100644 --- a/typedapi/types/indexversioning.go +++ b/typedapi/types/indexversioning.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndexVersioning type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L271-L274 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L279-L282 type IndexVersioning struct { Created *string `json:"created,omitempty"` CreatedString *string `json:"created_string,omitempty"` @@ -80,3 +80,13 @@ func NewIndexVersioning() *IndexVersioning { return r } + +// true + +type IndexVersioningVariant interface { + IndexVersioningCaster() *IndexVersioning +} + +func (s *IndexVersioning) IndexVersioningCaster() *IndexVersioning { + return s +} diff --git a/typedapi/types/indicatornode.go b/typedapi/types/indicatornode.go index 0d7b82dfb5..b1eb81a4a5 100644 --- a/typedapi/types/indicatornode.go +++ b/typedapi/types/indicatornode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndicatorNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L90-L93 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L91-L94 type IndicatorNode struct { Name *string `json:"name,omitempty"` NodeId *string `json:"node_id,omitempty"` @@ -87,3 +87,5 @@ func NewIndicatorNode() *IndicatorNode { return r } + +// false diff --git a/typedapi/types/indicators.go b/typedapi/types/indicators.go index ec613e257b..c20a7a5c74 100644 --- a/typedapi/types/indicators.go +++ b/typedapi/types/indicators.go @@ -16,14 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Indicators type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L32-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L32-L41 type Indicators struct { + DataStreamLifecycle *DataStreamLifecycleIndicator `json:"data_stream_lifecycle,omitempty"` Disk *DiskIndicator `json:"disk,omitempty"` Ilm *IlmIndicator `json:"ilm,omitempty"` MasterIsStable *MasterIsStableIndicator `json:"master_is_stable,omitempty"` @@ -39,3 +40,5 @@ func NewIndicators() *Indicators { return r } + +// false diff --git a/typedapi/types/indices.go b/typedapi/types/indices.go index e15ac32f2d..6bb5467a23 100644 --- a/typedapi/types/indices.go +++ b/typedapi/types/indices.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Indices type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L67-L67 type Indices []string + +type IndicesVariant interface { + IndicesCaster() *Indices +} diff --git a/typedapi/types/indicesaction.go b/typedapi/types/indicesaction.go index 3535f56933..ed7b2ec837 100644 --- a/typedapi/types/indicesaction.go +++ b/typedapi/types/indicesaction.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // IndicesAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/update_aliases/types.ts#L23-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/update_aliases/types.ts#L23-L39 type IndicesAction struct { // Add Adds a data stream or index to an alias. // If the alias doesn’t exist, the `add` action creates it. - Add *AddAction `json:"add,omitempty"` + Add *AddAction `json:"add,omitempty"` + AdditionalIndicesActionProperty map[string]json.RawMessage `json:"-"` // Remove Removes a data stream or index from an alias. Remove *RemoveAction `json:"remove,omitempty"` // RemoveIndex Deletes an index. @@ -34,9 +40,50 @@ type IndicesAction struct { RemoveIndex *RemoveIndexAction `json:"remove_index,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s IndicesAction) MarshalJSON() ([]byte, error) { + type opt IndicesAction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIndicesActionProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIndicesActionProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIndicesAction returns a IndicesAction. func NewIndicesAction() *IndicesAction { - r := &IndicesAction{} + r := &IndicesAction{ + AdditionalIndicesActionProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IndicesActionVariant interface { + IndicesActionCaster() *IndicesAction +} + +func (s *IndicesAction) IndicesActionCaster() *IndicesAction { + return s +} diff --git a/typedapi/types/indicesblockstatus.go b/typedapi/types/indicesblockstatus.go index 3bc6963147..98b3a2a615 100644 --- a/typedapi/types/indicesblockstatus.go +++ b/typedapi/types/indicesblockstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndicesBlockStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/add_block/IndicesAddBlockResponse.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/add_block/IndicesAddBlockResponse.ts#L30-L33 type IndicesBlockStatus struct { Blocked bool `json:"blocked"` Name string `json:"name"` @@ -82,3 +82,5 @@ func NewIndicesBlockStatus() *IndicesBlockStatus { return r } + +// false diff --git a/typedapi/types/indicesindexingpressure.go b/typedapi/types/indicesindexingpressure.go index baf383ef55..d3c8d00b75 100644 --- a/typedapi/types/indicesindexingpressure.go +++ b/typedapi/types/indicesindexingpressure.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndicesIndexingPressure type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L550-L552 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L575-L577 type IndicesIndexingPressure struct { Memory IndicesIndexingPressureMemory `json:"memory"` } @@ -33,3 +33,13 @@ func NewIndicesIndexingPressure() *IndicesIndexingPressure { return r } + +// true + +type IndicesIndexingPressureVariant interface { + IndicesIndexingPressureCaster() *IndicesIndexingPressure +} + +func (s *IndicesIndexingPressure) IndicesIndexingPressureCaster() *IndicesIndexingPressure { + return s +} diff --git a/typedapi/types/indicesindexingpressurememory.go b/typedapi/types/indicesindexingpressurememory.go index 1a30a65f70..330e637ccc 100644 --- a/typedapi/types/indicesindexingpressurememory.go +++ b/typedapi/types/indicesindexingpressurememory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndicesIndexingPressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L554-L561 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L579-L586 type IndicesIndexingPressureMemory struct { // Limit Number of outstanding bytes that may be consumed by indexing requests. When // this limit is reached or exceeded, @@ -83,3 +83,13 @@ func NewIndicesIndexingPressureMemory() *IndicesIndexingPressureMemory { return r } + +// true + +type IndicesIndexingPressureMemoryVariant interface { + IndicesIndexingPressureMemoryCaster() *IndicesIndexingPressureMemory +} + +func (s *IndicesIndexingPressureMemory) IndicesIndexingPressureMemoryCaster() *IndicesIndexingPressureMemory { + return s +} diff --git a/typedapi/types/indicesmodifyaction.go b/typedapi/types/indicesmodifyaction.go index 213c45f183..d3e465adf6 100644 --- a/typedapi/types/indicesmodifyaction.go +++ b/typedapi/types/indicesmodifyaction.go @@ -16,29 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // IndicesModifyAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/modify_data_stream/types.ts#L22-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/modify_data_stream/types.ts#L22-L37 type IndicesModifyAction struct { // AddBackingIndex Adds an existing index as a backing index for a data stream. // The index is hidden as part of this operation. // WARNING: Adding indices with the `add_backing_index` action can potentially // result in improper data stream behavior. // This should be considered an expert level API. - AddBackingIndex *IndexAndDataStreamAction `json:"add_backing_index,omitempty"` + AddBackingIndex *IndexAndDataStreamAction `json:"add_backing_index,omitempty"` + AdditionalIndicesModifyActionProperty map[string]json.RawMessage `json:"-"` // RemoveBackingIndex Removes a backing index from a data stream. // The index is unhidden as part of this operation. // A data stream’s write index cannot be removed. RemoveBackingIndex *IndexAndDataStreamAction `json:"remove_backing_index,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s IndicesModifyAction) MarshalJSON() ([]byte, error) { + type opt IndicesModifyAction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIndicesModifyActionProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIndicesModifyActionProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIndicesModifyAction returns a IndicesModifyAction. func NewIndicesModifyAction() *IndicesModifyAction { - r := &IndicesModifyAction{} + r := &IndicesModifyAction{ + AdditionalIndicesModifyActionProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IndicesModifyActionVariant interface { + IndicesModifyActionCaster() *IndicesModifyAction +} + +func (s *IndicesModifyAction) IndicesModifyActionCaster() *IndicesModifyAction { + return s +} diff --git a/typedapi/types/indicesoptions.go b/typedapi/types/indicesoptions.go index d371d0f553..26f40946f2 100644 --- a/typedapi/types/indicesoptions.go +++ b/typedapi/types/indicesoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // IndicesOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L335-L362 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L339-L366 type IndicesOptions struct { // AllowNoIndices If false, the request returns an error if any wildcard expression, index // alias, or `_all` value targets only @@ -138,3 +138,13 @@ func NewIndicesOptions() *IndicesOptions { return r } + +// true + +type IndicesOptionsVariant interface { + IndicesOptionsCaster() *IndicesOptions +} + +func (s *IndicesOptions) IndicesOptionsCaster() *IndicesOptions { + return s +} diff --git a/typedapi/types/indicesprivileges.go b/typedapi/types/indicesprivileges.go index 4adc91c616..6118948aa7 100644 --- a/typedapi/types/indicesprivileges.go +++ b/typedapi/types/indicesprivileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // IndicesPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L197-L221 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L216-L242 type IndicesPrivileges struct { // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that // cover restricted indices. Implicitly, restricted indices have limited @@ -91,19 +91,8 @@ func (s *IndicesPrivileges) UnmarshalJSON(data []byte) error { } case "names": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Names", err) - } - - s.Names = append(s.Names, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { - return fmt.Errorf("%s | %w", "Names", err) - } + if err := dec.Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) } case "privileges": @@ -129,7 +118,7 @@ func (s *IndicesPrivileges) UnmarshalJSON(data []byte) error { switch t { - case "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": o := NewQuery() localDec := json.NewDecoder(bytes.NewReader(message)) if err := localDec.Decode(&o); err != nil { @@ -167,3 +156,13 @@ func NewIndicesPrivileges() *IndicesPrivileges { return r } + +// true + +type IndicesPrivilegesVariant interface { + IndicesPrivilegesCaster() *IndicesPrivileges +} + +func (s *IndicesPrivileges) IndicesPrivilegesCaster() *IndicesPrivileges { + return s +} diff --git a/typedapi/types/indicesprivilegesquery.go b/typedapi/types/indicesprivilegesquery.go index c02c853ddf..dd1fcfc79a 100644 --- a/typedapi/types/indicesprivilegesquery.go +++ b/typedapi/types/indicesprivilegesquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,9 @@ package types // Query // RoleTemplateQuery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L247-L255 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L341-L349 type IndicesPrivilegesQuery any + +type IndicesPrivilegesQueryVariant interface { + IndicesPrivilegesQueryCaster() *IndicesPrivilegesQuery +} diff --git a/typedapi/types/indicesrecord.go b/typedapi/types/indicesrecord.go index 32d4116a56..fefb452d55 100644 --- a/typedapi/types/indicesrecord.go +++ b/typedapi/types/indicesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndicesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/indices/types.ts#L20-L801 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/indices/types.ts#L20-L808 type IndicesRecord struct { // BulkAvgSizeInBytes average size in bytes of shard bulk BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` @@ -49,6 +49,8 @@ type IndicesRecord struct { CreationDate *string `json:"creation.date,omitempty"` // CreationDateString index creation date (as string) CreationDateString *string `json:"creation.date.string,omitempty"` + // DatasetSize total size of dataset (including the cache for partially mounted indices) + DatasetSize *string `json:"dataset.size,omitempty"` // DocsCount available docs DocsCount *string `json:"docs.count,omitempty"` // DocsDeleted deleted docs @@ -430,6 +432,18 @@ func (s *IndicesRecord) UnmarshalJSON(data []byte) error { } s.CreationDateString = &o + case "dataset.size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DatasetSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DatasetSize = &o + case "docs.count", "dc", "docsCount": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -2037,3 +2051,5 @@ func NewIndicesRecord() *IndicesRecord { return r } + +// false diff --git a/typedapi/types/indicesshardsstats.go b/typedapi/types/indicesshardsstats.go index 4b0f896e7d..73e898008b 100644 --- a/typedapi/types/indicesshardsstats.go +++ b/typedapi/types/indicesshardsstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndicesShardsStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L52-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L52-L55 type IndicesShardsStats struct { AllFields FieldSummary `json:"all_fields"` Fields map[string]FieldSummary `json:"fields"` @@ -31,8 +31,10 @@ type IndicesShardsStats struct { // NewIndicesShardsStats returns a IndicesShardsStats. func NewIndicesShardsStats() *IndicesShardsStats { r := &IndicesShardsStats{ - Fields: make(map[string]FieldSummary, 0), + Fields: make(map[string]FieldSummary), } return r } + +// false diff --git a/typedapi/types/indicesshardstats.go b/typedapi/types/indicesshardstats.go index b648c131f5..7c4a8d0d2e 100644 --- a/typedapi/types/indicesshardstats.go +++ b/typedapi/types/indicesshardstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // IndicesShardStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L192-L223 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L192-L223 type IndicesShardStats struct { Bulk *BulkStats `json:"bulk,omitempty"` Commit *ShardCommit `json:"commit,omitempty"` @@ -59,8 +59,10 @@ type IndicesShardStats struct { // NewIndicesShardStats returns a IndicesShardStats. func NewIndicesShardStats() *IndicesShardStats { r := &IndicesShardStats{ - Shards: make(map[string]json.RawMessage, 0), + Shards: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/indicesshardstores.go b/typedapi/types/indicesshardstores.go index 1f43ba9236..2fdbc74f73 100644 --- a/typedapi/types/indicesshardstores.go +++ b/typedapi/types/indicesshardstores.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IndicesShardStores type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shard_stores/types.ts#L26-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shard_stores/types.ts#L25-L27 type IndicesShardStores struct { Shards map[string]ShardStoreWrapper `json:"shards"` } @@ -30,8 +30,10 @@ type IndicesShardStores struct { // NewIndicesShardStores returns a IndicesShardStores. func NewIndicesShardStores() *IndicesShardStores { r := &IndicesShardStores{ - Shards: make(map[string]ShardStoreWrapper, 0), + Shards: make(map[string]ShardStoreWrapper), } return r } + +// false diff --git a/typedapi/types/indicesstats.go b/typedapi/types/indicesstats.go index 1e14481eb9..e7fc26699e 100644 --- a/typedapi/types/indicesstats.go +++ b/typedapi/types/indicesstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // IndicesStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L95-L110 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L95-L110 type IndicesStats struct { Health *healthstatus.HealthStatus `json:"health,omitempty"` Primaries *IndexStats `json:"primaries,omitempty"` @@ -99,8 +99,10 @@ func (s *IndicesStats) UnmarshalJSON(data []byte) error { // NewIndicesStats returns a IndicesStats. func NewIndicesStats() *IndicesStats { r := &IndicesStats{ - Shards: make(map[string][]IndicesShardStats, 0), + Shards: make(map[string][]IndicesShardStats), } return r } + +// false diff --git a/typedapi/types/indicesvalidationexplanation.go b/typedapi/types/indicesvalidationexplanation.go index 9b5f724e8c..8f283d561d 100644 --- a/typedapi/types/indicesvalidationexplanation.go +++ b/typedapi/types/indicesvalidationexplanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndicesValidationExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37 type IndicesValidationExplanation struct { Error *string `json:"error,omitempty"` Explanation *string `json:"explanation,omitempty"` @@ -108,3 +108,5 @@ func NewIndicesValidationExplanation() *IndicesValidationExplanation { return r } + +// false diff --git a/typedapi/types/indicesversions.go b/typedapi/types/indicesversions.go index 551cd0629d..156ec21822 100644 --- a/typedapi/types/indicesversions.go +++ b/typedapi/types/indicesversions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IndicesVersions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L263-L268 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L263-L268 type IndicesVersions struct { IndexCount int `json:"index_count"` PrimaryShardCount int `json:"primary_shard_count"` @@ -117,3 +117,5 @@ func NewIndicesVersions() *IndicesVersions { return r } + +// false diff --git a/typedapi/types/indonesiananalyzer.go b/typedapi/types/indonesiananalyzer.go new file mode 100644 index 0000000000..8c8830465f --- /dev/null +++ b/typedapi/types/indonesiananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndonesianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L193-L198 +type IndonesianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *IndonesianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IndonesianAnalyzer) MarshalJSON() ([]byte, error) { + type innerIndonesianAnalyzer IndonesianAnalyzer + tmp := innerIndonesianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "indonesian" + + return json.Marshal(tmp) +} + +// NewIndonesianAnalyzer returns a IndonesianAnalyzer. +func NewIndonesianAnalyzer() *IndonesianAnalyzer { + r := &IndonesianAnalyzer{} + + return r +} + +// true + +type IndonesianAnalyzerVariant interface { + IndonesianAnalyzerCaster() *IndonesianAnalyzer +} + +func (s *IndonesianAnalyzer) IndonesianAnalyzerCaster() *IndonesianAnalyzer { + return s +} diff --git a/typedapi/types/inferenceaggregate.go b/typedapi/types/inferenceaggregate.go index c09c1b593d..58b31be433 100644 --- a/typedapi/types/inferenceaggregate.go +++ b/typedapi/types/inferenceaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InferenceAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L663-L677 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L755-L770 type InferenceAggregate struct { Data map[string]json.RawMessage `json:"-"` FeatureImportance []InferenceFeatureImportance `json:"feature_importance,omitempty"` @@ -138,8 +138,10 @@ func (s InferenceAggregate) MarshalJSON() ([]byte, error) { // NewInferenceAggregate returns a InferenceAggregate. func NewInferenceAggregate() *InferenceAggregate { r := &InferenceAggregate{ - Data: make(map[string]json.RawMessage, 0), + Data: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/inferenceaggregation.go b/typedapi/types/inferenceaggregation.go index 62c6bbca97..e05d2b992e 100644 --- a/typedapi/types/inferenceaggregation.go +++ b/typedapi/types/inferenceaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // InferenceAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L205-L214 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L225-L234 type InferenceAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -107,3 +107,13 @@ func NewInferenceAggregation() *InferenceAggregation { return r } + +// true + +type InferenceAggregationVariant interface { + InferenceAggregationCaster() *InferenceAggregation +} + +func (s *InferenceAggregation) InferenceAggregationCaster() *InferenceAggregation { + return s +} diff --git a/typedapi/types/inferencechunkingsettings.go b/typedapi/types/inferencechunkingsettings.go new file mode 100644 index 0000000000..abcc6dead6 --- /dev/null +++ b/typedapi/types/inferencechunkingsettings.go @@ -0,0 +1,182 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceChunkingSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Services.ts#L60-L89 +type InferenceChunkingSettings struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // MaxChunkSize The maximum size of a chunk in words. + // This value cannot be higher than `300` or lower than `20` (for `sentence` + // strategy) or `10` (for `word` strategy). + MaxChunkSize *int `json:"max_chunk_size,omitempty"` + // Overlap The number of overlapping words for chunks. + // It is applicable only to a `word` chunking strategy. + // This value cannot be higher than half the `max_chunk_size` value. + Overlap *int `json:"overlap,omitempty"` + // SentenceOverlap The number of overlapping sentences for chunks. + // It is applicable only for a `sentence` chunking strategy. + // It can be either `1` or `0`. + SentenceOverlap *int `json:"sentence_overlap,omitempty"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // Strategy The chunking strategy: `sentence` or `word`. + Strategy *string `json:"strategy,omitempty"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +func (s *InferenceChunkingSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chunking_settings": + if err := dec.Decode(&s.ChunkingSettings); err != nil { + return fmt.Errorf("%s | %w", "ChunkingSettings", err) + } + + case "max_chunk_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxChunkSize", err) + } + s.MaxChunkSize = &value + case float64: + f := int(v) + s.MaxChunkSize = &f + } + + case "overlap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Overlap", err) + } + s.Overlap = &value + case float64: + f := int(v) + s.Overlap = &f + } + + case "sentence_overlap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SentenceOverlap", err) + } + s.SentenceOverlap = &value + case float64: + f := int(v) + s.SentenceOverlap = &f + } + + case "service": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Service", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Service = o + + case "service_settings": + if err := dec.Decode(&s.ServiceSettings); err != nil { + return fmt.Errorf("%s | %w", "ServiceSettings", err) + } + + case "strategy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Strategy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Strategy = &o + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} + +// NewInferenceChunkingSettings returns a InferenceChunkingSettings. +func NewInferenceChunkingSettings() *InferenceChunkingSettings { + r := &InferenceChunkingSettings{} + + return r +} + +// true + +type InferenceChunkingSettingsVariant interface { + InferenceChunkingSettingsCaster() *InferenceChunkingSettings +} + +func (s *InferenceChunkingSettings) InferenceChunkingSettingsCaster() *InferenceChunkingSettings { + return s +} diff --git a/typedapi/types/inferenceclassimportance.go b/typedapi/types/inferenceclassimportance.go index b50219e807..a74db45cbd 100644 --- a/typedapi/types/inferenceclassimportance.go +++ b/typedapi/types/inferenceclassimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InferenceClassImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L691-L694 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L784-L787 type InferenceClassImportance struct { ClassName string `json:"class_name"` Importance Float64 `json:"importance"` @@ -91,3 +91,5 @@ func NewInferenceClassImportance() *InferenceClassImportance { return r } + +// false diff --git a/typedapi/types/inferenceconfig.go b/typedapi/types/inferenceconfig.go index ce45c20d9d..91809fff10 100644 --- a/typedapi/types/inferenceconfig.go +++ b/typedapi/types/inferenceconfig.go @@ -16,23 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // InferenceConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L746-L758 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1048-L1060 type InferenceConfig struct { + AdditionalInferenceConfigProperty map[string]json.RawMessage `json:"-"` // Classification Classification configuration for inference. Classification *InferenceConfigClassification `json:"classification,omitempty"` // Regression Regression configuration for inference. Regression *InferenceConfigRegression `json:"regression,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfig) MarshalJSON() ([]byte, error) { + type opt InferenceConfig + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewInferenceConfig returns a InferenceConfig. func NewInferenceConfig() *InferenceConfig { - r := &InferenceConfig{} + r := &InferenceConfig{ + AdditionalInferenceConfigProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type InferenceConfigVariant interface { + InferenceConfigCaster() *InferenceConfig +} + +func (s *InferenceConfig) InferenceConfigCaster() *InferenceConfig { + return s +} diff --git a/typedapi/types/inferenceconfigclassification.go b/typedapi/types/inferenceconfigclassification.go index fae6f02edc..6795e0b024 100644 --- a/typedapi/types/inferenceconfigclassification.go +++ b/typedapi/types/inferenceconfigclassification.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InferenceConfigClassification type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L773-L799 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1075-L1101 type InferenceConfigClassification struct { // NumTopClasses Specifies the number of top class predictions to return. NumTopClasses *int `json:"num_top_classes,omitempty"` @@ -127,3 +127,13 @@ func NewInferenceConfigClassification() *InferenceConfigClassification { return r } + +// true + +type InferenceConfigClassificationVariant interface { + InferenceConfigClassificationCaster() *InferenceConfigClassification +} + +func (s *InferenceConfigClassification) InferenceConfigClassificationCaster() *InferenceConfigClassification { + return s +} diff --git a/typedapi/types/inferenceconfigcontainer.go b/typedapi/types/inferenceconfigcontainer.go index e2cec8b751..ec885cd40c 100644 --- a/typedapi/types/inferenceconfigcontainer.go +++ b/typedapi/types/inferenceconfigcontainer.go @@ -16,23 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // InferenceConfigContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L216-L222 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L236-L242 type InferenceConfigContainer struct { + AdditionalInferenceConfigContainerProperty map[string]json.RawMessage `json:"-"` // Classification Classification configuration for inference. Classification *ClassificationInferenceOptions `json:"classification,omitempty"` // Regression Regression configuration for inference. Regression *RegressionInferenceOptions `json:"regression,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfigContainer) MarshalJSON() ([]byte, error) { + type opt InferenceConfigContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewInferenceConfigContainer returns a InferenceConfigContainer. func NewInferenceConfigContainer() *InferenceConfigContainer { - r := &InferenceConfigContainer{} + r := &InferenceConfigContainer{ + AdditionalInferenceConfigContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type InferenceConfigContainerVariant interface { + InferenceConfigContainerCaster() *InferenceConfigContainer +} + +func (s *InferenceConfigContainer) InferenceConfigContainerCaster() *InferenceConfigContainer { + return s +} diff --git a/typedapi/types/inferenceconfigcreatecontainer.go b/typedapi/types/inferenceconfigcreatecontainer.go index fde9799f76..d7c641367c 100644 --- a/typedapi/types/inferenceconfigcreatecontainer.go +++ b/typedapi/types/inferenceconfigcreatecontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // InferenceConfigCreateContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L23-L80 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L23-L80 type InferenceConfigCreateContainer struct { + AdditionalInferenceConfigCreateContainerProperty map[string]json.RawMessage `json:"-"` // Classification Classification configuration for inference. Classification *ClassificationInferenceOptions `json:"classification,omitempty"` // FillMask Fill mask configuration for inference. @@ -46,9 +52,50 @@ type InferenceConfigCreateContainer struct { ZeroShotClassification *ZeroShotClassificationInferenceOptions `json:"zero_shot_classification,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfigCreateContainer) MarshalJSON() ([]byte, error) { + type opt InferenceConfigCreateContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigCreateContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigCreateContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewInferenceConfigCreateContainer returns a InferenceConfigCreateContainer. func NewInferenceConfigCreateContainer() *InferenceConfigCreateContainer { - r := &InferenceConfigCreateContainer{} + r := &InferenceConfigCreateContainer{ + AdditionalInferenceConfigCreateContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type InferenceConfigCreateContainerVariant interface { + InferenceConfigCreateContainerCaster() *InferenceConfigCreateContainer +} + +func (s *InferenceConfigCreateContainer) InferenceConfigCreateContainerCaster() *InferenceConfigCreateContainer { + return s +} diff --git a/typedapi/types/inferenceconfigregression.go b/typedapi/types/inferenceconfigregression.go index 928f2ad59a..78e530ecbb 100644 --- a/typedapi/types/inferenceconfigregression.go +++ b/typedapi/types/inferenceconfigregression.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InferenceConfigRegression type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L760-L771 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1062-L1073 type InferenceConfigRegression struct { // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` @@ -87,3 +87,13 @@ func NewInferenceConfigRegression() *InferenceConfigRegression { return r } + +// true + +type InferenceConfigRegressionVariant interface { + InferenceConfigRegressionCaster() *InferenceConfigRegression +} + +func (s *InferenceConfigRegression) InferenceConfigRegressionCaster() *InferenceConfigRegression { + return s +} diff --git a/typedapi/types/inferenceconfigupdatecontainer.go b/typedapi/types/inferenceconfigupdatecontainer.go index 915b763303..6ec4c8e515 100644 --- a/typedapi/types/inferenceconfigupdatecontainer.go +++ b/typedapi/types/inferenceconfigupdatecontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // InferenceConfigUpdateContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L296-L318 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L284-L306 type InferenceConfigUpdateContainer struct { + AdditionalInferenceConfigUpdateContainerProperty map[string]json.RawMessage `json:"-"` // Classification Classification configuration for inference. Classification *ClassificationInferenceOptions `json:"classification,omitempty"` // FillMask Fill mask configuration for inference. @@ -46,9 +52,50 @@ type InferenceConfigUpdateContainer struct { ZeroShotClassification *ZeroShotClassificationInferenceUpdateOptions `json:"zero_shot_classification,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfigUpdateContainer) MarshalJSON() ([]byte, error) { + type opt InferenceConfigUpdateContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigUpdateContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigUpdateContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewInferenceConfigUpdateContainer returns a InferenceConfigUpdateContainer. func NewInferenceConfigUpdateContainer() *InferenceConfigUpdateContainer { - r := &InferenceConfigUpdateContainer{} + r := &InferenceConfigUpdateContainer{ + AdditionalInferenceConfigUpdateContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type InferenceConfigUpdateContainerVariant interface { + InferenceConfigUpdateContainerCaster() *InferenceConfigUpdateContainer +} + +func (s *InferenceConfigUpdateContainer) InferenceConfigUpdateContainerCaster() *InferenceConfigUpdateContainer { + return s +} diff --git a/typedapi/types/inferenceendpoint.go b/typedapi/types/inferenceendpoint.go index 72a6ad3fa7..d7dcd7ff09 100644 --- a/typedapi/types/inferenceendpoint.go +++ b/typedapi/types/inferenceendpoint.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,14 +31,16 @@ import ( // InferenceEndpoint type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Services.ts#L23-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Services.ts#L24-L44 type InferenceEndpoint struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *InferenceChunkingSettings `json:"chunking_settings,omitempty"` // Service The service type Service string `json:"service"` // ServiceSettings Settings specific to the service ServiceSettings json.RawMessage `json:"service_settings"` // TaskSettings Task settings specific to the service and task type - TaskSettings json.RawMessage `json:"task_settings"` + TaskSettings json.RawMessage `json:"task_settings,omitempty"` } func (s *InferenceEndpoint) UnmarshalJSON(data []byte) error { @@ -56,6 +58,11 @@ func (s *InferenceEndpoint) UnmarshalJSON(data []byte) error { switch t { + case "chunking_settings": + if err := dec.Decode(&s.ChunkingSettings); err != nil { + return fmt.Errorf("%s | %w", "ChunkingSettings", err) + } + case "service": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -89,3 +96,13 @@ func NewInferenceEndpoint() *InferenceEndpoint { return r } + +// true + +type InferenceEndpointVariant interface { + InferenceEndpointCaster() *InferenceEndpoint +} + +func (s *InferenceEndpoint) InferenceEndpointCaster() *InferenceEndpoint { + return s +} diff --git a/typedapi/types/inferenceendpointinfo.go b/typedapi/types/inferenceendpointinfo.go index bb4f99415e..449c882130 100644 --- a/typedapi/types/inferenceendpointinfo.go +++ b/typedapi/types/inferenceendpointinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,8 +33,10 @@ import ( // InferenceEndpointInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Services.ts#L41-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Services.ts#L46-L58 type InferenceEndpointInfo struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *InferenceChunkingSettings `json:"chunking_settings,omitempty"` // InferenceId The inference Id InferenceId string `json:"inference_id"` // Service The service type @@ -42,7 +44,7 @@ type InferenceEndpointInfo struct { // ServiceSettings Settings specific to the service ServiceSettings json.RawMessage `json:"service_settings"` // TaskSettings Task settings specific to the service and task type - TaskSettings json.RawMessage `json:"task_settings"` + TaskSettings json.RawMessage `json:"task_settings,omitempty"` // TaskType The task type TaskType tasktype.TaskType `json:"task_type"` } @@ -62,6 +64,11 @@ func (s *InferenceEndpointInfo) UnmarshalJSON(data []byte) error { switch t { + case "chunking_settings": + if err := dec.Decode(&s.ChunkingSettings); err != nil { + return fmt.Errorf("%s | %w", "ChunkingSettings", err) + } + case "inference_id": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -112,3 +119,5 @@ func NewInferenceEndpointInfo() *InferenceEndpointInfo { return r } + +// false diff --git a/typedapi/types/inferencefeatureimportance.go b/typedapi/types/inferencefeatureimportance.go index 9ffcb95391..54407b3815 100644 --- a/typedapi/types/inferencefeatureimportance.go +++ b/typedapi/types/inferencefeatureimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InferenceFeatureImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L685-L689 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L778-L782 type InferenceFeatureImportance struct { Classes []InferenceClassImportance `json:"classes,omitempty"` FeatureName string `json:"feature_name"` @@ -97,3 +97,5 @@ func NewInferenceFeatureImportance() *InferenceFeatureImportance { return r } + +// false diff --git a/typedapi/types/inferenceprocessor.go b/typedapi/types/inferenceprocessor.go index bc14b87b2c..dbceb5f087 100644 --- a/typedapi/types/inferenceprocessor.go +++ b/typedapi/types/inferenceprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InferenceProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L725-L744 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1027-L1046 type InferenceProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -158,8 +158,18 @@ func (s *InferenceProcessor) UnmarshalJSON(data []byte) error { // NewInferenceProcessor returns a InferenceProcessor. func NewInferenceProcessor() *InferenceProcessor { r := &InferenceProcessor{ - FieldMap: make(map[string]json.RawMessage, 0), + FieldMap: make(map[string]json.RawMessage), } return r } + +// true + +type InferenceProcessorVariant interface { + InferenceProcessorCaster() *InferenceProcessor +} + +func (s *InferenceProcessor) InferenceProcessorCaster() *InferenceProcessor { + return s +} diff --git a/typedapi/types/inferenceresponseresult.go b/typedapi/types/inferenceresponseresult.go index 1199efd5ba..ba0f2bbf87 100644 --- a/typedapi/types/inferenceresponseresult.go +++ b/typedapi/types/inferenceresponseresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InferenceResponseResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L459-L506 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L447-L495 type InferenceResponseResult struct { // Entities If the model is trained for named entity recognition (NER) tasks, the // response contains the recognized entities. @@ -55,7 +55,7 @@ type InferenceResponseResult struct { // For regression models, its a numerical value // For classification models, it may be an integer, double, boolean or string // depending on prediction type - PredictedValue []PredictedValue `json:"predicted_value,omitempty"` + PredictedValue [][]ScalarValue `json:"predicted_value,omitempty"` // PredictedValueSequence For fill mask tasks, the response contains the input text sequence with the // mask token replaced by the predicted // value. @@ -116,7 +116,7 @@ func (s *InferenceResponseResult) UnmarshalJSON(data []byte) error { rawMsg := json.RawMessage{} dec.Decode(&rawMsg) if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(PredictedValue) + o := new([]ScalarValue) if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { return fmt.Errorf("%s | %w", "PredictedValue", err) } @@ -200,3 +200,5 @@ func NewInferenceResponseResult() *InferenceResponseResult { return r } + +// false diff --git a/typedapi/types/inferencetopclassentry.go b/typedapi/types/inferencetopclassentry.go index 93de8a7ae9..e7c9c59ae7 100644 --- a/typedapi/types/inferencetopclassentry.go +++ b/typedapi/types/inferencetopclassentry.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InferenceTopClassEntry type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L679-L683 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L772-L776 type InferenceTopClassEntry struct { ClassName FieldValue `json:"class_name"` ClassProbability Float64 `json:"class_probability"` @@ -101,3 +101,5 @@ func NewInferenceTopClassEntry() *InferenceTopClassEntry { return r } + +// false diff --git a/typedapi/types/influence.go b/typedapi/types/influence.go index aed3d1d1fd..4dc2806180 100644 --- a/typedapi/types/influence.go +++ b/typedapi/types/influence.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Influence type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Anomaly.ts#L140-L143 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Anomaly.ts#L141-L144 type Influence struct { InfluencerFieldName string `json:"influencer_field_name"` InfluencerFieldValues []string `json:"influencer_field_values"` @@ -80,3 +80,5 @@ func NewInfluence() *Influence { return r } + +// false diff --git a/typedapi/types/influencer.go b/typedapi/types/influencer.go index 714982bde1..ca4a9979ec 100644 --- a/typedapi/types/influencer.go +++ b/typedapi/types/influencer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Influencer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Influencer.ts#L31-L83 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Influencer.ts#L24-L76 type Influencer struct { // BucketSpan The length of the bucket in seconds. This value matches the bucket span that // is specified in the job. @@ -218,3 +218,5 @@ func NewInfluencer() *Influencer { return r } + +// false diff --git a/typedapi/types/infofeaturestate.go b/typedapi/types/infofeaturestate.go index 0195d54d9d..6d836ca5ae 100644 --- a/typedapi/types/infofeaturestate.go +++ b/typedapi/types/infofeaturestate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InfoFeatureState type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotInfoFeatureState.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotInfoFeatureState.ts#L22-L25 type InfoFeatureState struct { FeatureName string `json:"feature_name"` Indices []string `json:"indices"` @@ -91,3 +91,5 @@ func NewInfoFeatureState() *InfoFeatureState { return r } + +// false diff --git a/typedapi/types/simulateingest.go b/typedapi/types/ingest.go similarity index 71% rename from typedapi/types/simulateingest.go rename to typedapi/types/ingest.go index bf4685cdb0..797ab84566 100644 --- a/typedapi/types/simulateingest.go +++ b/typedapi/types/ingest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,15 +28,16 @@ import ( "io" ) -// SimulateIngest type. +// Ingest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/simulate/types.ts#L28-L31 -type SimulateIngest struct { +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Simulation.ts#L29-L37 +type Ingest struct { Pipeline *string `json:"pipeline,omitempty"` + Redact_ *Redact `json:"_redact,omitempty"` Timestamp DateTime `json:"timestamp"` } -func (s *SimulateIngest) UnmarshalJSON(data []byte) error { +func (s *Ingest) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -56,6 +57,11 @@ func (s *SimulateIngest) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Pipeline", err) } + case "_redact": + if err := dec.Decode(&s.Redact_); err != nil { + return fmt.Errorf("%s | %w", "Redact_", err) + } + case "timestamp": if err := dec.Decode(&s.Timestamp); err != nil { return fmt.Errorf("%s | %w", "Timestamp", err) @@ -66,9 +72,11 @@ func (s *SimulateIngest) UnmarshalJSON(data []byte) error { return nil } -// NewSimulateIngest returns a SimulateIngest. -func NewSimulateIngest() *SimulateIngest { - r := &SimulateIngest{} +// NewIngest returns a Ingest. +func NewIngest() *Ingest { + r := &Ingest{} return r } + +// false diff --git a/typedapi/types/ingestdocumentsimulation.go b/typedapi/types/ingestdocumentsimulation.go new file mode 100644 index 0000000000..5d15f30df6 --- /dev/null +++ b/typedapi/types/ingestdocumentsimulation.go @@ -0,0 +1,170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IngestDocumentSimulation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/simulate/ingest/SimulateIngestResponse.ts#L35-L78 +type IngestDocumentSimulation struct { + // Error Any error resulting from simulatng ingest on this doc. This can be an error + // generated by + // executing a processor, or a mapping validation error when simulating indexing + // the resulting + // doc. + Error *ErrorCause `json:"error,omitempty"` + // ExecutedPipelines A list of the names of the pipelines executed on this document. + ExecutedPipelines []string `json:"executed_pipelines"` + // Id_ Identifier for the document. + Id_ string `json:"_id"` + // IgnoredFields A list of the fields that would be ignored at the indexing step. For example, + // a field whose + // value is larger than the allowed limit would make it through all of the + // pipelines, but + // would not be indexed into Elasticsearch. + IgnoredFields []map[string]string `json:"ignored_fields,omitempty"` + // Index_ Name of the index that the document would be indexed into if this were not a + // simulation. + Index_ string `json:"_index"` + IngestDocumentSimulation map[string]string `json:"-"` + // Source_ JSON body for the document. + Source_ map[string]json.RawMessage `json:"_source"` + Version_ StringifiedVersionNumber `json:"_version"` +} + +func (s *IngestDocumentSimulation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "executed_pipelines": + if err := dec.Decode(&s.ExecutedPipelines); err != nil { + return fmt.Errorf("%s | %w", "ExecutedPipelines", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "ignored_fields": + if err := dec.Decode(&s.IgnoredFields); err != nil { + return fmt.Errorf("%s | %w", "IgnoredFields", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "_source": + if s.Source_ == nil { + s.Source_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return fmt.Errorf("%s | %w", "Version_", err) + } + + default: + + if key, ok := t.(string); ok { + if s.IngestDocumentSimulation == nil { + s.IngestDocumentSimulation = make(map[string]string, 0) + } + raw := new(string) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "IngestDocumentSimulation", err) + } + s.IngestDocumentSimulation[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IngestDocumentSimulation) MarshalJSON() ([]byte, error) { + type opt IngestDocumentSimulation + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.IngestDocumentSimulation { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "IngestDocumentSimulation") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIngestDocumentSimulation returns a IngestDocumentSimulation. +func NewIngestDocumentSimulation() *IngestDocumentSimulation { + r := &IngestDocumentSimulation{ + IngestDocumentSimulation: make(map[string]string), + Source_: make(map[string]json.RawMessage), + } + + return r +} + +// false diff --git a/typedapi/types/ingestpipeline.go b/typedapi/types/ingestpipeline.go index bf4340720c..6a92610125 100644 --- a/typedapi/types/ingestpipeline.go +++ b/typedapi/types/ingestpipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,8 +31,13 @@ import ( // IngestPipeline type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Pipeline.ts#L23-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Pipeline.ts#L23-L51 type IngestPipeline struct { + // Deprecated Marks this ingest pipeline as deprecated. + // When a deprecated ingest pipeline is referenced as the default or final + // pipeline when creating or updating a non-deprecated index template, + // Elasticsearch will emit a deprecation warning. + Deprecated *bool `json:"deprecated,omitempty"` // Description Description of the ingest pipeline. Description *string `json:"description,omitempty"` // Meta_ Arbitrary metadata about the ingest pipeline. This map is not automatically @@ -62,6 +67,20 @@ func (s *IngestPipeline) UnmarshalJSON(data []byte) error { switch t { + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + case "description": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -105,3 +124,13 @@ func NewIngestPipeline() *IngestPipeline { return r } + +// true + +type IngestPipelineVariant interface { + IngestPipelineCaster() *IngestPipeline +} + +func (s *IngestPipeline) IngestPipelineCaster() *IngestPipeline { + return s +} diff --git a/typedapi/types/ingestpipelineparams.go b/typedapi/types/ingestpipelineparams.go index 886e5ac8f0..3c7f0a0279 100644 --- a/typedapi/types/ingestpipelineparams.go +++ b/typedapi/types/ingestpipelineparams.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IngestPipelineParams type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L148-L153 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L148-L153 type IngestPipelineParams struct { ExtractBinaryContent bool `json:"extract_binary_content"` Name string `json:"name"` @@ -119,3 +119,13 @@ func NewIngestPipelineParams() *IngestPipelineParams { return r } + +// true + +type IngestPipelineParamsVariant interface { + IngestPipelineParamsCaster() *IngestPipelineParams +} + +func (s *IngestPipelineParams) IngestPipelineParamsCaster() *IngestPipelineParams { + return s +} diff --git a/typedapi/types/ingeststats.go b/typedapi/types/ingeststats.go new file mode 100644 index 0000000000..991b7b87f0 --- /dev/null +++ b/typedapi/types/ingeststats.go @@ -0,0 +1,177 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IngestStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L356-L394 +type IngestStats struct { + // Count Total number of documents ingested during the lifetime of this node. + Count int64 `json:"count"` + // Current Total number of documents currently being ingested. + Current int64 `json:"current"` + // Failed Total number of failed ingest operations during the lifetime of this node. + Failed int64 `json:"failed"` + // IngestedAsFirstPipelineInBytes Total number of bytes of all documents ingested by the pipeline. + // This field is only present on pipelines which are the first to process a + // document. + // Thus, it is not present on pipelines which only serve as a final pipeline + // after a default pipeline, a pipeline run after a reroute processor, or + // pipelines in pipeline processors. + IngestedAsFirstPipelineInBytes int64 `json:"ingested_as_first_pipeline_in_bytes"` + // Processors Total number of ingest processors. + Processors []map[string]KeyedProcessor `json:"processors"` + // ProducedAsFirstPipelineInBytes Total number of bytes of all documents produced by the pipeline. + // This field is only present on pipelines which are the first to process a + // document. + // Thus, it is not present on pipelines which only serve as a final pipeline + // after a default pipeline, a pipeline run after a reroute processor, or + // pipelines in pipeline processors. + // In situations where there are subsequent pipelines, the value represents the + // size of the document after all pipelines have run. + ProducedAsFirstPipelineInBytes int64 `json:"produced_as_first_pipeline_in_bytes"` + // TimeInMillis Total time, in milliseconds, spent preprocessing ingest documents during the + // lifetime of this node. + TimeInMillis int64 `json:"time_in_millis"` +} + +func (s *IngestStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Current", err) + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = value + case float64: + f := int64(v) + s.Failed = f + } + + case "ingested_as_first_pipeline_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IngestedAsFirstPipelineInBytes", err) + } + s.IngestedAsFirstPipelineInBytes = value + case float64: + f := int64(v) + s.IngestedAsFirstPipelineInBytes = f + } + + case "processors": + if err := dec.Decode(&s.Processors); err != nil { + return fmt.Errorf("%s | %w", "Processors", err) + } + + case "produced_as_first_pipeline_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ProducedAsFirstPipelineInBytes", err) + } + s.ProducedAsFirstPipelineInBytes = value + case float64: + f := int64(v) + s.ProducedAsFirstPipelineInBytes = f + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeInMillis", err) + } + + } + } + return nil +} + +// NewIngestStats returns a IngestStats. +func NewIngestStats() *IngestStats { + r := &IngestStats{} + + return r +} + +// false diff --git a/typedapi/types/ingesttotal.go b/typedapi/types/ingesttotal.go index 2f58202ebf..519c72fc1b 100644 --- a/typedapi/types/ingesttotal.go +++ b/typedapi/types/ingesttotal.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,19 +31,17 @@ import ( // IngestTotal type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L356-L377 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L396-L413 type IngestTotal struct { // Count Total number of documents ingested during the lifetime of this node. - Count *int64 `json:"count,omitempty"` + Count int64 `json:"count"` // Current Total number of documents currently being ingested. - Current *int64 `json:"current,omitempty"` + Current int64 `json:"current"` // Failed Total number of failed ingest operations during the lifetime of this node. - Failed *int64 `json:"failed,omitempty"` - // Processors Total number of ingest processors. - Processors []map[string]KeyedProcessor `json:"processors,omitempty"` + Failed int64 `json:"failed"` // TimeInMillis Total time, in milliseconds, spent preprocessing ingest documents during the // lifetime of this node. - TimeInMillis *int64 `json:"time_in_millis,omitempty"` + TimeInMillis int64 `json:"time_in_millis"` } func (s *IngestTotal) UnmarshalJSON(data []byte) error { @@ -70,10 +68,10 @@ func (s *IngestTotal) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "Count", err) } - s.Count = &value + s.Count = value case float64: f := int64(v) - s.Count = &f + s.Count = f } case "current": @@ -85,10 +83,10 @@ func (s *IngestTotal) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "Current", err) } - s.Current = &value + s.Current = value case float64: f := int64(v) - s.Current = &f + s.Current = f } case "failed": @@ -100,15 +98,10 @@ func (s *IngestTotal) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "Failed", err) } - s.Failed = &value + s.Failed = value case float64: f := int64(v) - s.Failed = &f - } - - case "processors": - if err := dec.Decode(&s.Processors); err != nil { - return fmt.Errorf("%s | %w", "Processors", err) + s.Failed = f } case "time_in_millis": @@ -127,3 +120,5 @@ func NewIngestTotal() *IngestTotal { return r } + +// false diff --git a/typedapi/types/inlineget.go b/typedapi/types/inlineget.go index ab129eae16..2cd81ac2fa 100644 --- a/typedapi/types/inlineget.go +++ b/typedapi/types/inlineget.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InlineGet type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L320-L333 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L324-L337 type InlineGet struct { Fields map[string]json.RawMessage `json:"fields,omitempty"` Found bool `json:"found"` @@ -159,9 +159,11 @@ func (s InlineGet) MarshalJSON() ([]byte, error) { // NewInlineGet returns a InlineGet. func NewInlineGet() *InlineGet { r := &InlineGet{ - Fields: make(map[string]json.RawMessage, 0), - Metadata: make(map[string]json.RawMessage, 0), + Fields: make(map[string]json.RawMessage), + Metadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/inlinegetdictuserdefined.go b/typedapi/types/inlinegetdictuserdefined.go index 45ba972dad..e51f405138 100644 --- a/typedapi/types/inlinegetdictuserdefined.go +++ b/typedapi/types/inlinegetdictuserdefined.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InlineGetDictUserDefined type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L320-L333 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L324-L337 type InlineGetDictUserDefined struct { Fields map[string]json.RawMessage `json:"fields,omitempty"` Found bool `json:"found"` @@ -162,10 +162,12 @@ func (s InlineGetDictUserDefined) MarshalJSON() ([]byte, error) { // NewInlineGetDictUserDefined returns a InlineGetDictUserDefined. func NewInlineGetDictUserDefined() *InlineGetDictUserDefined { r := &InlineGetDictUserDefined{ - Fields: make(map[string]json.RawMessage, 0), - InlineGetDictUserDefined: make(map[string]json.RawMessage, 0), - Source_: make(map[string]json.RawMessage, 0), + Fields: make(map[string]json.RawMessage), + InlineGetDictUserDefined: make(map[string]json.RawMessage), + Source_: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/innerhits.go b/typedapi/types/innerhits.go index 43dd635398..fcb38be5dc 100644 --- a/typedapi/types/innerhits.go +++ b/typedapi/types/innerhits.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InnerHits type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/hits.ts#L107-L141 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/hits.ts#L108-L142 type InnerHits struct { Collapse *FieldCollapse `json:"collapse,omitempty"` DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` @@ -294,8 +294,18 @@ func (s *InnerHits) UnmarshalJSON(data []byte) error { // NewInnerHits returns a InnerHits. func NewInnerHits() *InnerHits { r := &InnerHits{ - ScriptFields: make(map[string]ScriptField, 0), + ScriptFields: make(map[string]ScriptField), } return r } + +// true + +type InnerHitsVariant interface { + InnerHitsCaster() *InnerHits +} + +func (s *InnerHits) InnerHitsCaster() *InnerHits { + return s +} diff --git a/typedapi/types/innerhitsresult.go b/typedapi/types/innerhitsresult.go index f726ad4bbb..348e3bc315 100644 --- a/typedapi/types/innerhitsresult.go +++ b/typedapi/types/innerhitsresult.go @@ -16,15 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // InnerHitsResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/hits.ts#L85-L87 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/hits.ts#L86-L88 type InnerHitsResult struct { - Hits *HitsMetadata `json:"hits,omitempty"` + Hits HitsMetadata `json:"hits"` } // NewInnerHitsResult returns a InnerHitsResult. @@ -33,3 +33,5 @@ func NewInnerHitsResult() *InnerHitsResult { return r } + +// false diff --git a/typedapi/types/inprogress.go b/typedapi/types/inprogress.go index 8860625fd9..520dd02dae 100644 --- a/typedapi/types/inprogress.go +++ b/typedapi/types/inprogress.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // InProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/_types/SnapshotLifecycle.ts#L131-L136 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/_types/SnapshotLifecycle.ts#L141-L146 type InProgress struct { Name string `json:"name"` StartTimeMillis int64 `json:"start_time_millis"` @@ -92,3 +92,5 @@ func NewInProgress() *InProgress { return r } + +// false diff --git a/typedapi/types/input.go b/typedapi/types/input.go index b50d58ad13..794deb1a75 100644 --- a/typedapi/types/input.go +++ b/typedapi/types/input.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // Input type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L56-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L56-L58 type Input struct { FieldNames []string `json:"field_names"` } @@ -77,3 +77,13 @@ func NewInput() *Input { return r } + +// true + +type InputVariant interface { + InputCaster() *Input +} + +func (s *Input) InputCaster() *Input { + return s +} diff --git a/typedapi/types/integernumberproperty.go b/typedapi/types/integernumberproperty.go index 7a7dda6f80..07dcabf476 100644 --- a/typedapi/types/integernumberproperty.go +++ b/typedapi/types/integernumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // IntegerNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L157-L160 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L161-L164 type IntegerNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,13 +48,13 @@ type IntegerNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *int `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *int `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +163,313 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -554,301 +567,313 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -859,18 +884,6 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -885,6 +898,11 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -932,8 +950,8 @@ func (s IntegerNumberProperty) MarshalJSON() ([]byte, error) { OnScriptError: s.OnScriptError, Properties: s.Properties, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -947,10 +965,20 @@ func (s IntegerNumberProperty) MarshalJSON() ([]byte, error) { // NewIntegerNumberProperty returns a IntegerNumberProperty. func NewIntegerNumberProperty() *IntegerNumberProperty { r := &IntegerNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IntegerNumberPropertyVariant interface { + IntegerNumberPropertyCaster() *IntegerNumberProperty +} + +func (s *IntegerNumberProperty) IntegerNumberPropertyCaster() *IntegerNumberProperty { + return s +} diff --git a/typedapi/types/integerrangeproperty.go b/typedapi/types/integerrangeproperty.go index 73f0f25a6d..97234d2cbe 100644 --- a/typedapi/types/integerrangeproperty.go +++ b/typedapi/types/integerrangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // IntegerRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/range.ts#L42-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/range.ts#L42-L44 type IntegerRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,11 +45,11 @@ type IntegerRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { @@ -150,301 +151,313 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -507,318 +520,318 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -833,6 +846,11 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -847,19 +865,19 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { func (s IntegerRangeProperty) MarshalJSON() ([]byte, error) { type innerIntegerRangeProperty IntegerRangeProperty tmp := innerIntegerRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "integer_range" @@ -870,10 +888,20 @@ func (s IntegerRangeProperty) MarshalJSON() ([]byte, error) { // NewIntegerRangeProperty returns a IntegerRangeProperty. func NewIntegerRangeProperty() *IntegerRangeProperty { r := &IntegerRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IntegerRangePropertyVariant interface { + IntegerRangePropertyCaster() *IntegerRangeProperty +} + +func (s *IntegerRangeProperty) IntegerRangePropertyCaster() *IntegerRangeProperty { + return s +} diff --git a/typedapi/types/intervals.go b/typedapi/types/intervals.go index e3c9b7e3d5..b8dc5102cb 100644 --- a/typedapi/types/intervals.go +++ b/typedapi/types/intervals.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // Intervals type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L83-L110 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L83-L110 type Intervals struct { + AdditionalIntervalsProperty map[string]json.RawMessage `json:"-"` // AllOf Returns matches that span a combination of other rules. AllOf *IntervalsAllOf `json:"all_of,omitempty"` // AnyOf Returns intervals produced by any of its sub-rules. @@ -38,9 +44,50 @@ type Intervals struct { Wildcard *IntervalsWildcard `json:"wildcard,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s Intervals) MarshalJSON() ([]byte, error) { + type opt Intervals + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIntervalsProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIntervalsProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIntervals returns a Intervals. func NewIntervals() *Intervals { - r := &Intervals{} + r := &Intervals{ + AdditionalIntervalsProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IntervalsVariant interface { + IntervalsCaster() *Intervals +} + +func (s *Intervals) IntervalsCaster() *Intervals { + return s +} diff --git a/typedapi/types/intervalsallof.go b/typedapi/types/intervalsallof.go index 63260ce457..e5e164e3e3 100644 --- a/typedapi/types/intervalsallof.go +++ b/typedapi/types/intervalsallof.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IntervalsAllOf type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L50-L70 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L50-L70 type IntervalsAllOf struct { // Filter Rule used to filter returned intervals. Filter *IntervalsFilter `json:"filter,omitempty"` @@ -113,3 +113,13 @@ func NewIntervalsAllOf() *IntervalsAllOf { return r } + +// true + +type IntervalsAllOfVariant interface { + IntervalsAllOfCaster() *IntervalsAllOf +} + +func (s *IntervalsAllOf) IntervalsAllOfCaster() *IntervalsAllOf { + return s +} diff --git a/typedapi/types/intervalsanyof.go b/typedapi/types/intervalsanyof.go index 1eccc234ac..253c60e5a1 100644 --- a/typedapi/types/intervalsanyof.go +++ b/typedapi/types/intervalsanyof.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IntervalsAnyOf type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L72-L81 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L72-L81 type IntervalsAnyOf struct { // Filter Rule used to filter returned intervals. Filter *IntervalsFilter `json:"filter,omitempty"` @@ -36,3 +36,13 @@ func NewIntervalsAnyOf() *IntervalsAnyOf { return r } + +// true + +type IntervalsAnyOfVariant interface { + IntervalsAnyOfCaster() *IntervalsAnyOf +} + +func (s *IntervalsAnyOf) IntervalsAnyOfCaster() *IntervalsAnyOf { + return s +} diff --git a/typedapi/types/intervalsfilter.go b/typedapi/types/intervalsfilter.go index 9e34fb65d8..729d98392b 100644 --- a/typedapi/types/intervalsfilter.go +++ b/typedapi/types/intervalsfilter.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // IntervalsFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L112-L152 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L112-L152 type IntervalsFilter struct { + AdditionalIntervalsFilterProperty map[string]json.RawMessage `json:"-"` // After Query used to return intervals that follow an interval from the `filter` // rule. After *Intervals `json:"after,omitempty"` @@ -53,9 +59,50 @@ type IntervalsFilter struct { Script *Script `json:"script,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s IntervalsFilter) MarshalJSON() ([]byte, error) { + type opt IntervalsFilter + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIntervalsFilterProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIntervalsFilterProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIntervalsFilter returns a IntervalsFilter. func NewIntervalsFilter() *IntervalsFilter { - r := &IntervalsFilter{} + r := &IntervalsFilter{ + AdditionalIntervalsFilterProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IntervalsFilterVariant interface { + IntervalsFilterCaster() *IntervalsFilter +} + +func (s *IntervalsFilter) IntervalsFilterCaster() *IntervalsFilter { + return s +} diff --git a/typedapi/types/intervalsfuzzy.go b/typedapi/types/intervalsfuzzy.go index 9fb94ecc5f..8d8d1a7827 100644 --- a/typedapi/types/intervalsfuzzy.go +++ b/typedapi/types/intervalsfuzzy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IntervalsFuzzy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L154-L184 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L154-L184 type IntervalsFuzzy struct { // Analyzer Analyzer used to normalize the term. Analyzer *string `json:"analyzer,omitempty"` @@ -141,3 +141,13 @@ func NewIntervalsFuzzy() *IntervalsFuzzy { return r } + +// true + +type IntervalsFuzzyVariant interface { + IntervalsFuzzyCaster() *IntervalsFuzzy +} + +func (s *IntervalsFuzzy) IntervalsFuzzyCaster() *IntervalsFuzzy { + return s +} diff --git a/typedapi/types/intervalsmatch.go b/typedapi/types/intervalsmatch.go index 7cd9022802..fb07689ca9 100644 --- a/typedapi/types/intervalsmatch.go +++ b/typedapi/types/intervalsmatch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IntervalsMatch type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L186-L216 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L186-L216 type IntervalsMatch struct { // Analyzer Analyzer used to analyze terms in the query. Analyzer *string `json:"analyzer,omitempty"` @@ -141,3 +141,13 @@ func NewIntervalsMatch() *IntervalsMatch { return r } + +// true + +type IntervalsMatchVariant interface { + IntervalsMatchCaster() *IntervalsMatch +} + +func (s *IntervalsMatch) IntervalsMatchCaster() *IntervalsMatch { + return s +} diff --git a/typedapi/types/intervalsprefix.go b/typedapi/types/intervalsprefix.go index a4bb5c69c6..917ec53583 100644 --- a/typedapi/types/intervalsprefix.go +++ b/typedapi/types/intervalsprefix.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IntervalsPrefix type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L218-L233 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L218-L233 type IntervalsPrefix struct { // Analyzer Analyzer used to analyze the `prefix`. Analyzer *string `json:"analyzer,omitempty"` @@ -99,3 +99,13 @@ func NewIntervalsPrefix() *IntervalsPrefix { return r } + +// true + +type IntervalsPrefixVariant interface { + IntervalsPrefixCaster() *IntervalsPrefix +} + +func (s *IntervalsPrefix) IntervalsPrefixCaster() *IntervalsPrefix { + return s +} diff --git a/typedapi/types/intervalsquery.go b/typedapi/types/intervalsquery.go index 774ba298b7..6e63f72b2c 100644 --- a/typedapi/types/intervalsquery.go +++ b/typedapi/types/intervalsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,8 +31,9 @@ import ( // IntervalsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L235-L263 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L235-L266 type IntervalsQuery struct { + AdditionalIntervalsQueryProperty map[string]json.RawMessage `json:"-"` // AllOf Returns matches that span a combination of other rules. AllOf *IntervalsAllOf `json:"all_of,omitempty"` // AnyOf Returns intervals produced by any of its sub-rules. @@ -128,14 +129,68 @@ func (s *IntervalsQuery) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wildcard", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalIntervalsQueryProperty == nil { + s.AdditionalIntervalsQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalIntervalsQueryProperty", err) + } + s.AdditionalIntervalsQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s IntervalsQuery) MarshalJSON() ([]byte, error) { + type opt IntervalsQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIntervalsQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIntervalsQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewIntervalsQuery returns a IntervalsQuery. func NewIntervalsQuery() *IntervalsQuery { - r := &IntervalsQuery{} + r := &IntervalsQuery{ + AdditionalIntervalsQueryProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type IntervalsQueryVariant interface { + IntervalsQueryCaster() *IntervalsQuery +} + +func (s *IntervalsQuery) IntervalsQueryCaster() *IntervalsQuery { + return s +} diff --git a/typedapi/types/intervalswildcard.go b/typedapi/types/intervalswildcard.go index d3b8a09c19..ffdc80f67b 100644 --- a/typedapi/types/intervalswildcard.go +++ b/typedapi/types/intervalswildcard.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IntervalsWildcard type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L265-L280 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L268-L283 type IntervalsWildcard struct { // Analyzer Analyzer used to analyze the `pattern`. // Defaults to the top-level field's analyzer. @@ -100,3 +100,13 @@ func NewIntervalsWildcard() *IntervalsWildcard { return r } + +// true + +type IntervalsWildcardVariant interface { + IntervalsWildcardCaster() *IntervalsWildcard +} + +func (s *IntervalsWildcard) IntervalsWildcardCaster() *IntervalsWildcard { + return s +} diff --git a/typedapi/types/invertedindex.go b/typedapi/types/invertedindex.go index f64bd854ab..b20144d270 100644 --- a/typedapi/types/invertedindex.go +++ b/typedapi/types/invertedindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // InvertedIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L68-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L68-L76 type InvertedIndex struct { Offsets uint `json:"offsets"` Payloads uint `json:"payloads"` @@ -39,3 +39,5 @@ func NewInvertedIndex() *InvertedIndex { return r } + +// false diff --git a/typedapi/types/invocation.go b/typedapi/types/invocation.go index 2fbd299487..f75b3a0102 100644 --- a/typedapi/types/invocation.go +++ b/typedapi/types/invocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // Invocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/_types/SnapshotLifecycle.ts#L138-L141 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/_types/SnapshotLifecycle.ts#L148-L151 type Invocation struct { SnapshotName string `json:"snapshot_name"` Time DateTime `json:"time"` @@ -72,3 +72,5 @@ func NewInvocation() *Invocation { return r } + +// false diff --git a/typedapi/types/invocations.go b/typedapi/types/invocations.go index 1880af6979..b62e18b582 100644 --- a/typedapi/types/invocations.go +++ b/typedapi/types/invocations.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Invocations type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L44-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L41-L43 type Invocations struct { Total int64 `json:"total"` } @@ -77,3 +77,5 @@ func NewInvocations() *Invocations { return r } + +// false diff --git a/typedapi/types/iostatdevice.go b/typedapi/types/iostatdevice.go index 1a156fbcc2..91c413fc79 100644 --- a/typedapi/types/iostatdevice.go +++ b/typedapi/types/iostatdevice.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IoStatDevice type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L730-L755 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L801-L826 type IoStatDevice struct { // DeviceName The Linux device name. DeviceName *string `json:"device_name,omitempty"` @@ -165,3 +165,5 @@ func NewIoStatDevice() *IoStatDevice { return r } + +// false diff --git a/typedapi/types/iostats.go b/typedapi/types/iostats.go index 5f92faa6e4..6fe5daf507 100644 --- a/typedapi/types/iostats.go +++ b/typedapi/types/iostats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // IoStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L718-L728 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L789-L799 type IoStats struct { // Devices Array of disk metrics for each device that is backing an Elasticsearch data // path. @@ -40,3 +40,5 @@ func NewIoStats() *IoStats { return r } + +// false diff --git a/typedapi/types/ipfilter.go b/typedapi/types/ipfilter.go index c2aa889385..192bcba181 100644 --- a/typedapi/types/ipfilter.go +++ b/typedapi/types/ipfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IpFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L167-L170 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L177-L180 type IpFilter struct { Http bool `json:"http"` Transport bool `json:"transport"` @@ -91,3 +91,5 @@ func NewIpFilter() *IpFilter { return r } + +// false diff --git a/typedapi/types/ipinfo.go b/typedapi/types/ipinfo.go new file mode 100644 index 0000000000..aff5db07dc --- /dev/null +++ b/typedapi/types/ipinfo.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// Ipinfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Database.ts#L59-L59 +type Ipinfo struct { +} + +// NewIpinfo returns a Ipinfo. +func NewIpinfo() *Ipinfo { + r := &Ipinfo{} + + return r +} + +// true + +type IpinfoVariant interface { + IpinfoCaster() *Ipinfo +} + +func (s *Ipinfo) IpinfoCaster() *Ipinfo { + return s +} diff --git a/typedapi/types/iplocationdatabaseconfigurationmetadata.go b/typedapi/types/iplocationdatabaseconfigurationmetadata.go new file mode 100644 index 0000000000..319e7fcad0 --- /dev/null +++ b/typedapi/types/iplocationdatabaseconfigurationmetadata.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IpLocationDatabaseConfigurationMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/get_ip_location_database/GetIpLocationDatabaseResponse.ts#L28-L34 +type IpLocationDatabaseConfigurationMetadata struct { + Database DatabaseConfigurationFull `json:"database"` + Id string `json:"id"` + ModifiedDate *int64 `json:"modified_date,omitempty"` + ModifiedDateMillis *int64 `json:"modified_date_millis,omitempty"` + Version int64 `json:"version"` +} + +func (s *IpLocationDatabaseConfigurationMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database": + if err := dec.Decode(&s.Database); err != nil { + return fmt.Errorf("%s | %w", "Database", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "modified_date": + if err := dec.Decode(&s.ModifiedDate); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDate", err) + } + + case "modified_date_millis": + if err := dec.Decode(&s.ModifiedDateMillis); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDateMillis", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewIpLocationDatabaseConfigurationMetadata returns a IpLocationDatabaseConfigurationMetadata. +func NewIpLocationDatabaseConfigurationMetadata() *IpLocationDatabaseConfigurationMetadata { + r := &IpLocationDatabaseConfigurationMetadata{} + + return r +} + +// false diff --git a/typedapi/types/iplocationprocessor.go b/typedapi/types/iplocationprocessor.go new file mode 100644 index 0000000000..2ab0562261 --- /dev/null +++ b/typedapi/types/iplocationprocessor.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IpLocationProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L478-L512 +type IpLocationProcessor struct { + // DatabaseFile The database filename referring to a database the module ships with + // (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom + // database in the ingest-geoip config directory. + DatabaseFile *string `json:"database_file,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // DownloadDatabaseOnPipelineCreation If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the + // missing database is downloaded when the pipeline is created. + // Else, the download is triggered by when the pipeline is used as the + // `default_pipeline` or `final_pipeline` in an index. + DownloadDatabaseOnPipelineCreation *bool `json:"download_database_on_pipeline_creation,omitempty"` + // Field The field to get the ip address from for the geographical lookup. + Field string `json:"field"` + // FirstOnly If `true`, only the first found IP location data will be returned, even if + // the field contains an array. + FirstOnly *bool `json:"first_only,omitempty"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Controls what properties are added to the `target_field` based on the IP + // location lookup. + Properties []string `json:"properties,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will hold the geographical information looked up from the + // MaxMind database. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *IpLocationProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DatabaseFile", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DatabaseFile = &o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "download_database_on_pipeline_creation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DownloadDatabaseOnPipelineCreation", err) + } + s.DownloadDatabaseOnPipelineCreation = &value + case bool: + s.DownloadDatabaseOnPipelineCreation = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "first_only": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FirstOnly", err) + } + s.FirstOnly = &value + case bool: + s.FirstOnly = &v + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return fmt.Errorf("%s | %w", "Properties", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewIpLocationProcessor returns a IpLocationProcessor. +func NewIpLocationProcessor() *IpLocationProcessor { + r := &IpLocationProcessor{} + + return r +} + +// true + +type IpLocationProcessorVariant interface { + IpLocationProcessorCaster() *IpLocationProcessor +} + +func (s *IpLocationProcessor) IpLocationProcessorCaster() *IpLocationProcessor { + return s +} diff --git a/typedapi/types/ipprefixaggregate.go b/typedapi/types/ipprefixaggregate.go index 4f3d89e2a1..e4b5748042 100644 --- a/typedapi/types/ipprefixaggregate.go +++ b/typedapi/types/ipprefixaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IpPrefixAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L633-L634 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L709-L713 type IpPrefixAggregate struct { Buckets BucketsIpPrefixBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewIpPrefixAggregate() *IpPrefixAggregate { return r } + +// false diff --git a/typedapi/types/ipprefixaggregation.go b/typedapi/types/ipprefixaggregation.go index 7e8b8e6b29..e3a77ad069 100644 --- a/typedapi/types/ipprefixaggregation.go +++ b/typedapi/types/ipprefixaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IpPrefixAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1122-L1151 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1195-L1224 type IpPrefixAggregation struct { // AppendPrefixLength Defines whether the prefix length is appended to IP address keys in the // response. @@ -156,3 +156,13 @@ func NewIpPrefixAggregation() *IpPrefixAggregation { return r } + +// true + +type IpPrefixAggregationVariant interface { + IpPrefixAggregationCaster() *IpPrefixAggregation +} + +func (s *IpPrefixAggregation) IpPrefixAggregationCaster() *IpPrefixAggregation { + return s +} diff --git a/typedapi/types/ipprefixbucket.go b/typedapi/types/ipprefixbucket.go index 46b3dea94b..0d3701bf48 100644 --- a/typedapi/types/ipprefixbucket.go +++ b/typedapi/types/ipprefixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // IpPrefixBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L636-L641 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L715-L720 type IpPrefixBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -543,6 +543,13 @@ func (s *IpPrefixBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -676,8 +683,10 @@ func (s IpPrefixBucket) MarshalJSON() ([]byte, error) { // NewIpPrefixBucket returns a IpPrefixBucket. func NewIpPrefixBucket() *IpPrefixBucket { r := &IpPrefixBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/ipproperty.go b/typedapi/types/ipproperty.go index 8544148c14..17704f4b2a 100644 --- a/typedapi/types/ipproperty.go +++ b/typedapi/types/ipproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,11 +30,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // IpProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/specialized.ts#L65-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L74-L88 type IpProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -45,13 +46,13 @@ type IpProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *string `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -143,301 +144,313 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -531,301 +544,313 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -836,18 +861,6 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -862,6 +875,11 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -903,8 +921,8 @@ func (s IpProperty) MarshalJSON() ([]byte, error) { OnScriptError: s.OnScriptError, Properties: s.Properties, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, Type: s.Type, } @@ -917,10 +935,20 @@ func (s IpProperty) MarshalJSON() ([]byte, error) { // NewIpProperty returns a IpProperty. func NewIpProperty() *IpProperty { r := &IpProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IpPropertyVariant interface { + IpPropertyCaster() *IpProperty +} + +func (s *IpProperty) IpPropertyCaster() *IpProperty { + return s +} diff --git a/typedapi/types/iprangeaggregate.go b/typedapi/types/iprangeaggregate.go index 885db62457..8fc1a2c6ca 100644 --- a/typedapi/types/iprangeaggregate.go +++ b/typedapi/types/iprangeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IpRangeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L560-L562 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L624-L629 type IpRangeAggregate struct { Buckets BucketsIpRangeBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewIpRangeAggregate() *IpRangeAggregate { return r } + +// false diff --git a/typedapi/types/iprangeaggregation.go b/typedapi/types/iprangeaggregation.go index 00ae6914ce..3f80732b48 100644 --- a/typedapi/types/iprangeaggregation.go +++ b/typedapi/types/iprangeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // IpRangeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L550-L559 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L567-L576 type IpRangeAggregation struct { // Field The date field whose values are used to build ranges. Field *string `json:"field,omitempty"` @@ -74,3 +74,13 @@ func NewIpRangeAggregation() *IpRangeAggregation { return r } + +// true + +type IpRangeAggregationVariant interface { + IpRangeAggregationCaster() *IpRangeAggregation +} + +func (s *IpRangeAggregation) IpRangeAggregationCaster() *IpRangeAggregation { + return s +} diff --git a/typedapi/types/iprangeaggregationrange.go b/typedapi/types/iprangeaggregationrange.go index a7e269b4cf..adc3ccfc13 100644 --- a/typedapi/types/iprangeaggregationrange.go +++ b/typedapi/types/iprangeaggregationrange.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // IpRangeAggregationRange type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L561-L574 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L578-L591 type IpRangeAggregationRange struct { // From Start of the range. From *string `json:"from,omitempty"` @@ -103,3 +103,13 @@ func NewIpRangeAggregationRange() *IpRangeAggregationRange { return r } + +// true + +type IpRangeAggregationRangeVariant interface { + IpRangeAggregationRangeCaster() *IpRangeAggregationRange +} + +func (s *IpRangeAggregationRange) IpRangeAggregationRangeCaster() *IpRangeAggregationRange { + return s +} diff --git a/typedapi/types/iprangebucket.go b/typedapi/types/iprangebucket.go index bb786ecb69..f16628ff4f 100644 --- a/typedapi/types/iprangebucket.go +++ b/typedapi/types/iprangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // IpRangeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L564-L568 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L631-L635 type IpRangeBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -524,6 +524,13 @@ func (s *IpRangeBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -657,8 +664,10 @@ func (s IpRangeBucket) MarshalJSON() ([]byte, error) { // NewIpRangeBucket returns a IpRangeBucket. func NewIpRangeBucket() *IpRangeBucket { r := &IpRangeBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/iprangeproperty.go b/typedapi/types/iprangeproperty.go index fb2021e6aa..606987c961 100644 --- a/typedapi/types/iprangeproperty.go +++ b/typedapi/types/iprangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // IpRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/range.ts#L46-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/range.ts#L46-L48 type IpRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,11 +45,11 @@ type IpRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { @@ -150,301 +151,313 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -507,318 +520,318 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -833,6 +846,11 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -847,19 +865,19 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { func (s IpRangeProperty) MarshalJSON() ([]byte, error) { type innerIpRangeProperty IpRangeProperty tmp := innerIpRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "ip_range" @@ -870,10 +888,20 @@ func (s IpRangeProperty) MarshalJSON() ([]byte, error) { // NewIpRangeProperty returns a IpRangeProperty. func NewIpRangeProperty() *IpRangeProperty { r := &IpRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type IpRangePropertyVariant interface { + IpRangePropertyCaster() *IpRangeProperty +} + +func (s *IpRangeProperty) IpRangePropertyCaster() *IpRangeProperty { + return s +} diff --git a/typedapi/types/irishanalyzer.go b/typedapi/types/irishanalyzer.go new file mode 100644 index 0000000000..871bc0e71d --- /dev/null +++ b/typedapi/types/irishanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IrishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L200-L205 +type IrishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *IrishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IrishAnalyzer) MarshalJSON() ([]byte, error) { + type innerIrishAnalyzer IrishAnalyzer + tmp := innerIrishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "irish" + + return json.Marshal(tmp) +} + +// NewIrishAnalyzer returns a IrishAnalyzer. +func NewIrishAnalyzer() *IrishAnalyzer { + r := &IrishAnalyzer{} + + return r +} + +// true + +type IrishAnalyzerVariant interface { + IrishAnalyzerCaster() *IrishAnalyzer +} + +func (s *IrishAnalyzer) IrishAnalyzerCaster() *IrishAnalyzer { + return s +} diff --git a/typedapi/types/italiananalyzer.go b/typedapi/types/italiananalyzer.go new file mode 100644 index 0000000000..e37f8c8f27 --- /dev/null +++ b/typedapi/types/italiananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ItalianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L207-L212 +type ItalianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ItalianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ItalianAnalyzer) MarshalJSON() ([]byte, error) { + type innerItalianAnalyzer ItalianAnalyzer + tmp := innerItalianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "italian" + + return json.Marshal(tmp) +} + +// NewItalianAnalyzer returns a ItalianAnalyzer. +func NewItalianAnalyzer() *ItalianAnalyzer { + r := &ItalianAnalyzer{} + + return r +} + +// true + +type ItalianAnalyzerVariant interface { + ItalianAnalyzerCaster() *ItalianAnalyzer +} + +func (s *ItalianAnalyzer) ItalianAnalyzerCaster() *ItalianAnalyzer { + return s +} diff --git a/typedapi/types/job.go b/typedapi/types/job.go index 813784caef..019e139d62 100644 --- a/typedapi/types/job.go +++ b/typedapi/types/job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Job type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L61-L180 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L61-L180 type Job struct { // AllowLazyOpen Advanced configuration option. // Specifies whether this job can open when there is insufficient machine @@ -351,3 +351,5 @@ func NewJob() *Job { return r } + +// false diff --git a/typedapi/types/jobblocked.go b/typedapi/types/jobblocked.go index 1718ef5745..c61c9f30d6 100644 --- a/typedapi/types/jobblocked.go +++ b/typedapi/types/jobblocked.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // JobBlocked type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L392-L395 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L392-L395 type JobBlocked struct { Reason jobblockedreason.JobBlockedReason `json:"reason"` TaskId TaskId `json:"task_id,omitempty"` @@ -74,3 +74,5 @@ func NewJobBlocked() *JobBlocked { return r } + +// false diff --git a/typedapi/types/jobconfig.go b/typedapi/types/jobconfig.go index ac50faa589..fe766b3eda 100644 --- a/typedapi/types/jobconfig.go +++ b/typedapi/types/jobconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JobConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L182-L283 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L182-L283 type JobConfig struct { // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there // is insufficient machine learning node capacity for it to be immediately @@ -293,3 +293,13 @@ func NewJobConfig() *JobConfig { return r } + +// true + +type JobConfigVariant interface { + JobConfigCaster() *JobConfig +} + +func (s *JobConfig) JobConfigCaster() *JobConfig { + return s +} diff --git a/typedapi/types/jobforecaststatistics.go b/typedapi/types/jobforecaststatistics.go index 440a536363..ed97f76136 100644 --- a/typedapi/types/jobforecaststatistics.go +++ b/typedapi/types/jobforecaststatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JobForecastStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L343-L350 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L343-L350 type JobForecastStatistics struct { ForecastedJobs int `json:"forecasted_jobs"` MemoryBytes *JobStatistics `json:"memory_bytes,omitempty"` @@ -118,8 +118,10 @@ func (s *JobForecastStatistics) UnmarshalJSON(data []byte) error { // NewJobForecastStatistics returns a JobForecastStatistics. func NewJobForecastStatistics() *JobForecastStatistics { r := &JobForecastStatistics{ - Status: make(map[string]int64, 0), + Status: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/jobsrecord.go b/typedapi/types/jobsrecord.go index 0ed7051d59..c75c74c024 100644 --- a/typedapi/types/jobsrecord.go +++ b/typedapi/types/jobsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -35,7 +35,7 @@ import ( // JobsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/ml_jobs/types.ts#L24-L347 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/ml_jobs/types.ts#L24-L347 type JobsRecord struct { // AssignmentExplanation For open anomaly detection jobs only, contains messages relating to the // selection of a node to run the job. @@ -909,3 +909,5 @@ func NewJobsRecord() *JobsRecord { return r } + +// false diff --git a/typedapi/types/jobstatistics.go b/typedapi/types/jobstatistics.go index 2c16a09ec4..bbb4c1f3ac 100644 --- a/typedapi/types/jobstatistics.go +++ b/typedapi/types/jobstatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JobStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L54-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L54-L59 type JobStatistics struct { Avg Float64 `json:"avg"` Max Float64 `json:"max"` @@ -129,3 +129,5 @@ func NewJobStatistics() *JobStatistics { return r } + +// false diff --git a/typedapi/types/jobstats.go b/typedapi/types/jobstats.go index 0b4b1d8796..5e5e37e257 100644 --- a/typedapi/types/jobstats.go +++ b/typedapi/types/jobstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // JobStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L284-L330 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L284-L330 type JobStats struct { // AssignmentExplanation For open anomaly detection jobs only, contains messages relating to the // selection of a node to run the job. @@ -57,7 +57,7 @@ type JobStats struct { ModelSizeStats ModelSizeStats `json:"model_size_stats"` // Node Contains properties for the node that runs the job. // This information is available only for open jobs. - Node *DiscoveryNode `json:"node,omitempty"` + Node *DiscoveryNodeCompact `json:"node,omitempty"` // OpenTime For open jobs only, the elapsed time for which the job has been open. OpenTime DateTime `json:"open_time,omitempty"` // State The status of the anomaly detection job, which can be one of the following @@ -167,3 +167,5 @@ func NewJobStats() *JobStats { return r } + +// false diff --git a/typedapi/types/jobtimingstats.go b/typedapi/types/jobtimingstats.go index dce5c311df..763b92c235 100644 --- a/typedapi/types/jobtimingstats.go +++ b/typedapi/types/jobtimingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JobTimingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Job.ts#L332-L341 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Job.ts#L332-L341 type JobTimingStats struct { AverageBucketProcessingTimeMs Float64 `json:"average_bucket_processing_time_ms,omitempty"` BucketCount int64 `json:"bucket_count"` @@ -119,3 +119,5 @@ func NewJobTimingStats() *JobTimingStats { return r } + +// false diff --git a/typedapi/types/jobusage.go b/typedapi/types/jobusage.go index deb4b78362..dbfb4abbf4 100644 --- a/typedapi/types/jobusage.go +++ b/typedapi/types/jobusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JobUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L364-L370 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L374-L380 type JobUsage struct { Count int `json:"count"` CreatedBy map[string]int64 `json:"created_by"` @@ -102,8 +102,10 @@ func (s *JobUsage) UnmarshalJSON(data []byte) error { // NewJobUsage returns a JobUsage. func NewJobUsage() *JobUsage { r := &JobUsage{ - CreatedBy: make(map[string]int64, 0), + CreatedBy: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/joinprocessor.go b/typedapi/types/joinprocessor.go index 514a62868f..461e6dd3fa 100644 --- a/typedapi/types/joinprocessor.go +++ b/typedapi/types/joinprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JoinProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L801-L816 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1103-L1118 type JoinProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -157,3 +157,13 @@ func NewJoinProcessor() *JoinProcessor { return r } + +// true + +type JoinProcessorVariant interface { + JoinProcessorCaster() *JoinProcessor +} + +func (s *JoinProcessor) JoinProcessorCaster() *JoinProcessor { + return s +} diff --git a/typedapi/types/joinproperty.go b/typedapi/types/joinproperty.go index b95fd94293..2a4713e82e 100644 --- a/typedapi/types/joinproperty.go +++ b/typedapi/types/joinproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,21 +29,23 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // JoinProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L89-L93 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L92-L96 type JoinProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` EagerGlobalOrdinals *bool `json:"eager_global_ordinals,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Relations map[string][]string `json:"relations,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Relations map[string][]string `json:"relations,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *JoinProperty) UnmarshalJSON(data []byte) error { @@ -99,301 +101,313 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -442,301 +456,313 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -767,6 +793,11 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -788,6 +819,7 @@ func (s JoinProperty) MarshalJSON() ([]byte, error) { Meta: s.Meta, Properties: s.Properties, Relations: s.Relations, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -799,11 +831,21 @@ func (s JoinProperty) MarshalJSON() ([]byte, error) { // NewJoinProperty returns a JoinProperty. func NewJoinProperty() *JoinProperty { r := &JoinProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), - Relations: make(map[string][]string, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + Relations: make(map[string][]string), } return r } + +// true + +type JoinPropertyVariant interface { + JoinPropertyCaster() *JoinProperty +} + +func (s *JoinProperty) JoinPropertyCaster() *JoinProperty { + return s +} diff --git a/typedapi/types/jsonprocessor.go b/typedapi/types/jsonprocessor.go index a8f11b2ba0..4b4295efc3 100644 --- a/typedapi/types/jsonprocessor.go +++ b/typedapi/types/jsonprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // JsonProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L818-L847 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1120-L1149 type JsonProcessor struct { // AddToRoot Flag that forces the parsed JSON to be added at the top level of the // document. @@ -191,3 +191,13 @@ func NewJsonProcessor() *JsonProcessor { return r } + +// true + +type JsonProcessorVariant interface { + JsonProcessorCaster() *JsonProcessor +} + +func (s *JsonProcessor) JsonProcessorCaster() *JsonProcessor { + return s +} diff --git a/typedapi/types/jvm.go b/typedapi/types/jvm.go index 7274014443..74dbb8ed7f 100644 --- a/typedapi/types/jvm.go +++ b/typedapi/types/jvm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Jvm type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L811-L845 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L882-L916 type Jvm struct { // BufferPools Contains statistics about JVM buffer pools for the node. BufferPools map[string]NodeBufferPool `json:"buffer_pools,omitempty"` @@ -145,8 +145,10 @@ func (s *Jvm) UnmarshalJSON(data []byte) error { // NewJvm returns a Jvm. func NewJvm() *Jvm { r := &Jvm{ - BufferPools: make(map[string]NodeBufferPool, 0), + BufferPools: make(map[string]NodeBufferPool), } return r } + +// false diff --git a/typedapi/types/jvmclasses.go b/typedapi/types/jvmclasses.go index 7eb689949b..9df0c9b0b8 100644 --- a/typedapi/types/jvmclasses.go +++ b/typedapi/types/jvmclasses.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JvmClasses type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L908-L921 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L979-L992 type JvmClasses struct { // CurrentLoadedCount Number of classes currently loaded by JVM. CurrentLoadedCount *int64 `json:"current_loaded_count,omitempty"` @@ -112,3 +112,5 @@ func NewJvmClasses() *JvmClasses { return r } + +// false diff --git a/typedapi/types/jvmmemorystats.go b/typedapi/types/jvmmemorystats.go index 3b1706da93..e58a6ba4a3 100644 --- a/typedapi/types/jvmmemorystats.go +++ b/typedapi/types/jvmmemorystats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JvmMemoryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L847-L876 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L918-L947 type JvmMemoryStats struct { // HeapCommittedInBytes Amount of memory, in bytes, available for use by the heap. HeapCommittedInBytes *int64 `json:"heap_committed_in_bytes,omitempty"` @@ -170,8 +170,10 @@ func (s *JvmMemoryStats) UnmarshalJSON(data []byte) error { // NewJvmMemoryStats returns a JvmMemoryStats. func NewJvmMemoryStats() *JvmMemoryStats { r := &JvmMemoryStats{ - Pools: make(map[string]Pool, 0), + Pools: make(map[string]Pool), } return r } + +// false diff --git a/typedapi/types/jvmstats.go b/typedapi/types/jvmstats.go index 78f52ac0ce..7cec484399 100644 --- a/typedapi/types/jvmstats.go +++ b/typedapi/types/jvmstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JvmStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_memory_stats/types.ts#L50-L63 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_memory_stats/types.ts#L50-L63 type JvmStats struct { // HeapMax Maximum amount of memory available for use by the heap. HeapMax ByteSize `json:"heap_max,omitempty"` @@ -138,3 +138,5 @@ func NewJvmStats() *JvmStats { return r } + +// false diff --git a/typedapi/types/jvmthreads.go b/typedapi/types/jvmthreads.go index b90f1a2f7b..9328e99d78 100644 --- a/typedapi/types/jvmthreads.go +++ b/typedapi/types/jvmthreads.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // JvmThreads type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L897-L906 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L968-L977 type JvmThreads struct { // Count Number of active threads in use by JVM. Count *int64 `json:"count,omitempty"` @@ -95,3 +95,5 @@ func NewJvmThreads() *JvmThreads { return r } + +// false diff --git a/typedapi/types/keeptypestokenfilter.go b/typedapi/types/keeptypestokenfilter.go index 7ce38dfc13..d1c4903c20 100644 --- a/typedapi/types/keeptypestokenfilter.go +++ b/typedapi/types/keeptypestokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // KeepTypesTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L220-L224 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L219-L223 type KeepTypesTokenFilter struct { Mode *keeptypesmode.KeepTypesMode `json:"mode,omitempty"` Type string `json:"type,omitempty"` @@ -101,3 +101,13 @@ func NewKeepTypesTokenFilter() *KeepTypesTokenFilter { return r } + +// true + +type KeepTypesTokenFilterVariant interface { + KeepTypesTokenFilterCaster() *KeepTypesTokenFilter +} + +func (s *KeepTypesTokenFilter) KeepTypesTokenFilterCaster() *KeepTypesTokenFilter { + return s +} diff --git a/typedapi/types/keepwordstokenfilter.go b/typedapi/types/keepwordstokenfilter.go index 9054ac6ea9..d2e73ec40a 100644 --- a/typedapi/types/keepwordstokenfilter.go +++ b/typedapi/types/keepwordstokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KeepWordsTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L226-L231 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L225-L230 type KeepWordsTokenFilter struct { KeepWords []string `json:"keep_words,omitempty"` KeepWordsCase *bool `json:"keep_words_case,omitempty"` @@ -123,3 +123,13 @@ func NewKeepWordsTokenFilter() *KeepWordsTokenFilter { return r } + +// true + +type KeepWordsTokenFilterVariant interface { + KeepWordsTokenFilterCaster() *KeepWordsTokenFilter +} + +func (s *KeepWordsTokenFilter) KeepWordsTokenFilterCaster() *KeepWordsTokenFilter { + return s +} diff --git a/typedapi/types/keyedpercentiles.go b/typedapi/types/keyedpercentiles.go index d51ce8b0c3..870f4a8def 100644 --- a/typedapi/types/keyedpercentiles.go +++ b/typedapi/types/keyedpercentiles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // KeyedPercentiles type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L158-L158 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L160-L160 type KeyedPercentiles map[string]string func (s KeyedPercentiles) UnmarshalJSON(data []byte) error { diff --git a/typedapi/types/keyedprocessor.go b/typedapi/types/keyedprocessor.go index 1a64ac9fea..9584fd995d 100644 --- a/typedapi/types/keyedprocessor.go +++ b/typedapi/types/keyedprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KeyedProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L379-L382 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L415-L418 type KeyedProcessor struct { Stats *Processor `json:"stats,omitempty"` Type *string `json:"type,omitempty"` @@ -80,3 +80,5 @@ func NewKeyedProcessor() *KeyedProcessor { return r } + +// false diff --git a/typedapi/types/keyvalueprocessor.go b/typedapi/types/keyvalueprocessor.go index 6c42e75242..8bb6fcc340 100644 --- a/typedapi/types/keyvalueprocessor.go +++ b/typedapi/types/keyvalueprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KeyValueProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L856-L908 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1158-L1210 type KeyValueProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -265,3 +265,13 @@ func NewKeyValueProcessor() *KeyValueProcessor { return r } + +// true + +type KeyValueProcessorVariant interface { + KeyValueProcessorCaster() *KeyValueProcessor +} + +func (s *KeyValueProcessor) KeyValueProcessorCaster() *KeyValueProcessor { + return s +} diff --git a/typedapi/types/keywordanalyzer.go b/typedapi/types/keywordanalyzer.go index 6b8c921659..a6a9f6cb50 100644 --- a/typedapi/types/keywordanalyzer.go +++ b/typedapi/types/keywordanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // KeywordAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L47-L50 type KeywordAnalyzer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewKeywordAnalyzer() *KeywordAnalyzer { return r } + +// true + +type KeywordAnalyzerVariant interface { + KeywordAnalyzerCaster() *KeywordAnalyzer +} + +func (s *KeywordAnalyzer) KeywordAnalyzerCaster() *KeywordAnalyzer { + return s +} diff --git a/typedapi/types/keywordmarkertokenfilter.go b/typedapi/types/keywordmarkertokenfilter.go index 98d40d6e96..425325e196 100644 --- a/typedapi/types/keywordmarkertokenfilter.go +++ b/typedapi/types/keywordmarkertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KeywordMarkerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L233-L239 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L232-L238 type KeywordMarkerTokenFilter struct { IgnoreCase *bool `json:"ignore_case,omitempty"` Keywords []string `json:"keywords,omitempty"` @@ -71,8 +71,19 @@ func (s *KeywordMarkerTokenFilter) UnmarshalJSON(data []byte) error { } case "keywords": - if err := dec.Decode(&s.Keywords); err != nil { - return fmt.Errorf("%s | %w", "Keywords", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Keywords", err) + } + + s.Keywords = append(s.Keywords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Keywords); err != nil { + return fmt.Errorf("%s | %w", "Keywords", err) + } } case "keywords_path": @@ -137,3 +148,13 @@ func NewKeywordMarkerTokenFilter() *KeywordMarkerTokenFilter { return r } + +// true + +type KeywordMarkerTokenFilterVariant interface { + KeywordMarkerTokenFilterCaster() *KeywordMarkerTokenFilter +} + +func (s *KeywordMarkerTokenFilter) KeywordMarkerTokenFilterCaster() *KeywordMarkerTokenFilter { + return s +} diff --git a/typedapi/types/keywordproperty.go b/typedapi/types/keywordproperty.go index 646b56d173..d705d36c51 100644 --- a/typedapi/types/keywordproperty.go +++ b/typedapi/types/keywordproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // KeywordProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L95-L113 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L98-L117 type KeywordProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -47,16 +48,17 @@ type KeywordProperty struct { Index *bool `json:"index,omitempty"` IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Normalizer *string `json:"normalizer,omitempty"` - Norms *bool `json:"norms,omitempty"` - NullValue *string `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Normalizer *string `json:"normalizer,omitempty"` + Norms *bool `json:"norms,omitempty"` + NullValue *string `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Similarity *string `json:"similarity,omitempty"` + SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +164,313 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -567,301 +581,313 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -912,6 +938,11 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -959,6 +990,7 @@ func (s KeywordProperty) MarshalJSON() ([]byte, error) { Similarity: s.Similarity, SplitQueriesOnWhitespace: s.SplitQueriesOnWhitespace, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, Type: s.Type, } @@ -971,10 +1003,20 @@ func (s KeywordProperty) MarshalJSON() ([]byte, error) { // NewKeywordProperty returns a KeywordProperty. func NewKeywordProperty() *KeywordProperty { r := &KeywordProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type KeywordPropertyVariant interface { + KeywordPropertyCaster() *KeywordProperty +} + +func (s *KeywordProperty) KeywordPropertyCaster() *KeywordProperty { + return s +} diff --git a/typedapi/types/keywordtokenizer.go b/typedapi/types/keywordtokenizer.go index e0bcdb1918..e2d82056ea 100644 --- a/typedapi/types/keywordtokenizer.go +++ b/typedapi/types/keywordtokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,9 +31,9 @@ import ( // KeywordTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L62-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L68-L74 type KeywordTokenizer struct { - BufferSize int `json:"buffer_size"` + BufferSize *int `json:"buffer_size,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } @@ -63,10 +63,10 @@ func (s *KeywordTokenizer) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "BufferSize", err) } - s.BufferSize = value + s.BufferSize = &value case float64: f := int(v) - s.BufferSize = f + s.BufferSize = &f } case "type": @@ -104,3 +104,13 @@ func NewKeywordTokenizer() *KeywordTokenizer { return r } + +// true + +type KeywordTokenizerVariant interface { + KeywordTokenizerCaster() *KeywordTokenizer +} + +func (s *KeywordTokenizer) KeywordTokenizerCaster() *KeywordTokenizer { + return s +} diff --git a/typedapi/types/kibanatoken.go b/typedapi/types/kibanatoken.go index 63092bd4f2..17e6689ee3 100644 --- a/typedapi/types/kibanatoken.go +++ b/typedapi/types/kibanatoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,9 +31,12 @@ import ( // KibanaToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/enroll_kibana/Response.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/enroll_kibana/Response.ts#L31-L41 type KibanaToken struct { - Name string `json:"name"` + // Name The name of the bearer token for the `elastic/kibana` service account. + Name string `json:"name"` + // Value The value of the bearer token for the `elastic/kibana` service account. + // Use this value to authenticate the service account with Elasticsearch. Value string `json:"value"` } @@ -87,3 +90,5 @@ func NewKibanaToken() *KibanaToken { return r } + +// false diff --git a/typedapi/types/knncollectorresult.go b/typedapi/types/knncollectorresult.go new file mode 100644 index 0000000000..953d7c2230 --- /dev/null +++ b/typedapi/types/knncollectorresult.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KnnCollectorResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L222-L228 +type KnnCollectorResult struct { + Children []KnnCollectorResult `json:"children,omitempty"` + Name string `json:"name"` + Reason string `json:"reason"` + Time Duration `json:"time,omitempty"` + TimeInNanos int64 `json:"time_in_nanos"` +} + +func (s *KnnCollectorResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + } + } + return nil +} + +// NewKnnCollectorResult returns a KnnCollectorResult. +func NewKnnCollectorResult() *KnnCollectorResult { + r := &KnnCollectorResult{} + + return r +} + +// false diff --git a/typedapi/types/knnquery.go b/typedapi/types/knnquery.go index 58834881a1..e5bfda7c78 100644 --- a/typedapi/types/knnquery.go +++ b/typedapi/types/knnquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KnnQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Knn.ts#L54-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Knn.ts#L64-L87 type KnnQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -43,6 +43,8 @@ type KnnQuery struct { Field string `json:"field"` // Filter Filters for the kNN search query Filter []Query `json:"filter,omitempty"` + // K The final number of nearest neighbors to return as top hits + K *int `json:"k,omitempty"` // NumCandidates The number of nearest neighbor candidates to consider per shard NumCandidates *int `json:"num_candidates,omitempty"` QueryName_ *string `json:"_name,omitempty"` @@ -51,6 +53,8 @@ type KnnQuery struct { // QueryVectorBuilder The query vector builder. You must provide a query_vector_builder or // query_vector, but not both. QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // RescoreVector Apply oversampling and rescoring to quantized vectors * + RescoreVector *RescoreVector `json:"rescore_vector,omitempty"` // Similarity The minimum similarity for a vector to be considered a match Similarity *float32 `json:"similarity,omitempty"` } @@ -107,6 +111,22 @@ func (s *KnnQuery) UnmarshalJSON(data []byte) error { } } + case "k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "K", err) + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + case "num_candidates": var tmp any @@ -145,6 +165,11 @@ func (s *KnnQuery) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "QueryVectorBuilder", err) } + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + case "similarity": var tmp any dec.Decode(&tmp) @@ -172,3 +197,13 @@ func NewKnnQuery() *KnnQuery { return r } + +// true + +type KnnQueryVariant interface { + KnnQueryCaster() *KnnQuery +} + +func (s *KnnQuery) KnnQueryCaster() *KnnQuery { + return s +} diff --git a/typedapi/types/knnqueryprofilebreakdown.go b/typedapi/types/knnqueryprofilebreakdown.go new file mode 100644 index 0000000000..0f84b0e0df --- /dev/null +++ b/typedapi/types/knnqueryprofilebreakdown.go @@ -0,0 +1,385 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KnnQueryProfileBreakdown type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L199-L220 +type KnnQueryProfileBreakdown struct { + Advance int64 `json:"advance"` + AdvanceCount int64 `json:"advance_count"` + BuildScorer int64 `json:"build_scorer"` + BuildScorerCount int64 `json:"build_scorer_count"` + ComputeMaxScore int64 `json:"compute_max_score"` + ComputeMaxScoreCount int64 `json:"compute_max_score_count"` + CountWeight int64 `json:"count_weight"` + CountWeightCount int64 `json:"count_weight_count"` + CreateWeight int64 `json:"create_weight"` + CreateWeightCount int64 `json:"create_weight_count"` + Match int64 `json:"match"` + MatchCount int64 `json:"match_count"` + NextDoc int64 `json:"next_doc"` + NextDocCount int64 `json:"next_doc_count"` + Score int64 `json:"score"` + ScoreCount int64 `json:"score_count"` + SetMinCompetitiveScore int64 `json:"set_min_competitive_score"` + SetMinCompetitiveScoreCount int64 `json:"set_min_competitive_score_count"` + ShallowAdvance int64 `json:"shallow_advance"` + ShallowAdvanceCount int64 `json:"shallow_advance_count"` +} + +func (s *KnnQueryProfileBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "advance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Advance", err) + } + s.Advance = value + case float64: + f := int64(v) + s.Advance = f + } + + case "advance_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AdvanceCount", err) + } + s.AdvanceCount = value + case float64: + f := int64(v) + s.AdvanceCount = f + } + + case "build_scorer": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildScorer", err) + } + s.BuildScorer = value + case float64: + f := int64(v) + s.BuildScorer = f + } + + case "build_scorer_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildScorerCount", err) + } + s.BuildScorerCount = value + case float64: + f := int64(v) + s.BuildScorerCount = f + } + + case "compute_max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ComputeMaxScore", err) + } + s.ComputeMaxScore = value + case float64: + f := int64(v) + s.ComputeMaxScore = f + } + + case "compute_max_score_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ComputeMaxScoreCount", err) + } + s.ComputeMaxScoreCount = value + case float64: + f := int64(v) + s.ComputeMaxScoreCount = f + } + + case "count_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CountWeight", err) + } + s.CountWeight = value + case float64: + f := int64(v) + s.CountWeight = f + } + + case "count_weight_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CountWeightCount", err) + } + s.CountWeightCount = value + case float64: + f := int64(v) + s.CountWeightCount = f + } + + case "create_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeight", err) + } + s.CreateWeight = value + case float64: + f := int64(v) + s.CreateWeight = f + } + + case "create_weight_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeightCount", err) + } + s.CreateWeightCount = value + case float64: + f := int64(v) + s.CreateWeightCount = f + } + + case "match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + s.Match = value + case float64: + f := int64(v) + s.Match = f + } + + case "match_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MatchCount", err) + } + s.MatchCount = value + case float64: + f := int64(v) + s.MatchCount = f + } + + case "next_doc": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NextDoc", err) + } + s.NextDoc = value + case float64: + f := int64(v) + s.NextDoc = f + } + + case "next_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NextDocCount", err) + } + s.NextDocCount = value + case float64: + f := int64(v) + s.NextDocCount = f + } + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + s.Score = value + case float64: + f := int64(v) + s.Score = f + } + + case "score_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ScoreCount", err) + } + s.ScoreCount = value + case float64: + f := int64(v) + s.ScoreCount = f + } + + case "set_min_competitive_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SetMinCompetitiveScore", err) + } + s.SetMinCompetitiveScore = value + case float64: + f := int64(v) + s.SetMinCompetitiveScore = f + } + + case "set_min_competitive_score_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SetMinCompetitiveScoreCount", err) + } + s.SetMinCompetitiveScoreCount = value + case float64: + f := int64(v) + s.SetMinCompetitiveScoreCount = f + } + + case "shallow_advance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShallowAdvance", err) + } + s.ShallowAdvance = value + case float64: + f := int64(v) + s.ShallowAdvance = f + } + + case "shallow_advance_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShallowAdvanceCount", err) + } + s.ShallowAdvanceCount = value + case float64: + f := int64(v) + s.ShallowAdvanceCount = f + } + + } + } + return nil +} + +// NewKnnQueryProfileBreakdown returns a KnnQueryProfileBreakdown. +func NewKnnQueryProfileBreakdown() *KnnQueryProfileBreakdown { + r := &KnnQueryProfileBreakdown{} + + return r +} + +// false diff --git a/typedapi/types/knnqueryprofileresult.go b/typedapi/types/knnqueryprofileresult.go new file mode 100644 index 0000000000..16e83b99a3 --- /dev/null +++ b/typedapi/types/knnqueryprofileresult.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KnnQueryProfileResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L188-L196 +type KnnQueryProfileResult struct { + Breakdown KnnQueryProfileBreakdown `json:"breakdown"` + Children []KnnQueryProfileResult `json:"children,omitempty"` + Debug map[string]json.RawMessage `json:"debug,omitempty"` + Description string `json:"description"` + Time Duration `json:"time,omitempty"` + TimeInNanos int64 `json:"time_in_nanos"` + Type string `json:"type"` +} + +func (s *KnnQueryProfileResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return fmt.Errorf("%s | %w", "Breakdown", err) + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "debug": + if s.Debug == nil { + s.Debug = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Debug); err != nil { + return fmt.Errorf("%s | %w", "Debug", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewKnnQueryProfileResult returns a KnnQueryProfileResult. +func NewKnnQueryProfileResult() *KnnQueryProfileResult { + r := &KnnQueryProfileResult{ + Debug: make(map[string]json.RawMessage), + } + + return r +} + +// false diff --git a/typedapi/types/knnretriever.go b/typedapi/types/knnretriever.go index 3aee19cee0..b271393372 100644 --- a/typedapi/types/knnretriever.go +++ b/typedapi/types/knnretriever.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KnnRetriever type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Retriever.ts#L58-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Retriever.ts#L64-L82 type KnnRetriever struct { // Field The name of the vector field to search against. Field string `json:"field"` @@ -39,6 +39,9 @@ type KnnRetriever struct { Filter []Query `json:"filter,omitempty"` // K Number of nearest neighbors to return as top hits. K int `json:"k"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` // NumCandidates Number of nearest neighbor candidates to consider per shard. NumCandidates int `json:"num_candidates"` // QueryVector Query vector. Must have the same number of dimensions as the vector field you @@ -47,6 +50,8 @@ type KnnRetriever struct { QueryVector []float32 `json:"query_vector,omitempty"` // QueryVectorBuilder Defines a model to build a query vector. QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // RescoreVector Apply oversampling and rescoring to quantized vectors * + RescoreVector *RescoreVector `json:"rescore_vector,omitempty"` // Similarity The minimum similarity required for a document to be considered a match. Similarity *float32 `json:"similarity,omitempty"` } @@ -110,6 +115,22 @@ func (s *KnnRetriever) UnmarshalJSON(data []byte) error { s.K = f } + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + case "num_candidates": var tmp any @@ -136,6 +157,11 @@ func (s *KnnRetriever) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "QueryVectorBuilder", err) } + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + case "similarity": var tmp any dec.Decode(&tmp) @@ -163,3 +189,13 @@ func NewKnnRetriever() *KnnRetriever { return r } + +// true + +type KnnRetrieverVariant interface { + KnnRetrieverCaster() *KnnRetriever +} + +func (s *KnnRetriever) KnnRetrieverCaster() *KnnRetriever { + return s +} diff --git a/typedapi/types/knnsearch.go b/typedapi/types/knnsearch.go index ca95abbd80..5c2c27f9fd 100644 --- a/typedapi/types/knnsearch.go +++ b/typedapi/types/knnsearch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KnnSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Knn.ts#L30-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Knn.ts#L35-L62 type KnnSearch struct { // Boost Boost value to apply to kNN scores Boost *float32 `json:"boost,omitempty"` @@ -50,6 +50,8 @@ type KnnSearch struct { // QueryVectorBuilder The query vector builder. You must provide a query_vector_builder or // query_vector, but not both. QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // RescoreVector Apply oversampling and rescoring to quantized vectors * + RescoreVector *RescoreVector `json:"rescore_vector,omitempty"` // Similarity The minimum similarity for a vector to be considered a match Similarity *float32 `json:"similarity,omitempty"` } @@ -153,6 +155,11 @@ func (s *KnnSearch) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "QueryVectorBuilder", err) } + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + case "similarity": var tmp any dec.Decode(&tmp) @@ -180,3 +187,13 @@ func NewKnnSearch() *KnnSearch { return r } + +// true + +type KnnSearchVariant interface { + KnnSearchCaster() *KnnSearch +} + +func (s *KnnSearch) KnnSearchCaster() *KnnSearch { + return s +} diff --git a/typedapi/types/kstemtokenfilter.go b/typedapi/types/kstemtokenfilter.go index 7a9f1b12b7..907dd40a7a 100644 --- a/typedapi/types/kstemtokenfilter.go +++ b/typedapi/types/kstemtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // KStemTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L241-L243 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L240-L242 type KStemTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewKStemTokenFilter() *KStemTokenFilter { return r } + +// true + +type KStemTokenFilterVariant interface { + KStemTokenFilterCaster() *KStemTokenFilter +} + +func (s *KStemTokenFilter) KStemTokenFilterCaster() *KStemTokenFilter { + return s +} diff --git a/typedapi/types/kuromojianalyzer.go b/typedapi/types/kuromojianalyzer.go index 1160c1b903..0355f4ff7a 100644 --- a/typedapi/types/kuromojianalyzer.go +++ b/typedapi/types/kuromojianalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // KuromojiAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/kuromoji-plugin.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/kuromoji-plugin.ts#L25-L29 type KuromojiAnalyzer struct { Mode kuromojitokenizationmode.KuromojiTokenizationMode `json:"mode"` Type string `json:"type,omitempty"` @@ -102,3 +102,13 @@ func NewKuromojiAnalyzer() *KuromojiAnalyzer { return r } + +// true + +type KuromojiAnalyzerVariant interface { + KuromojiAnalyzerCaster() *KuromojiAnalyzer +} + +func (s *KuromojiAnalyzer) KuromojiAnalyzerCaster() *KuromojiAnalyzer { + return s +} diff --git a/typedapi/types/kuromojiiterationmarkcharfilter.go b/typedapi/types/kuromojiiterationmarkcharfilter.go index 87a3dd4c28..1890002a65 100644 --- a/typedapi/types/kuromojiiterationmarkcharfilter.go +++ b/typedapi/types/kuromojiiterationmarkcharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KuromojiIterationMarkCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/kuromoji-plugin.ts#L31-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/kuromoji-plugin.ts#L31-L35 type KuromojiIterationMarkCharFilter struct { NormalizeKana bool `json:"normalize_kana"` NormalizeKanji bool `json:"normalize_kanji"` @@ -118,3 +118,13 @@ func NewKuromojiIterationMarkCharFilter() *KuromojiIterationMarkCharFilter { return r } + +// true + +type KuromojiIterationMarkCharFilterVariant interface { + KuromojiIterationMarkCharFilterCaster() *KuromojiIterationMarkCharFilter +} + +func (s *KuromojiIterationMarkCharFilter) KuromojiIterationMarkCharFilterCaster() *KuromojiIterationMarkCharFilter { + return s +} diff --git a/typedapi/types/kuromojipartofspeechtokenfilter.go b/typedapi/types/kuromojipartofspeechtokenfilter.go index c644722871..2ec781590b 100644 --- a/typedapi/types/kuromojipartofspeechtokenfilter.go +++ b/typedapi/types/kuromojipartofspeechtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // KuromojiPartOfSpeechTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/kuromoji-plugin.ts#L37-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/kuromoji-plugin.ts#L37-L40 type KuromojiPartOfSpeechTokenFilter struct { Stoptags []string `json:"stoptags"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewKuromojiPartOfSpeechTokenFilter() *KuromojiPartOfSpeechTokenFilter { return r } + +// true + +type KuromojiPartOfSpeechTokenFilterVariant interface { + KuromojiPartOfSpeechTokenFilterCaster() *KuromojiPartOfSpeechTokenFilter +} + +func (s *KuromojiPartOfSpeechTokenFilter) KuromojiPartOfSpeechTokenFilterCaster() *KuromojiPartOfSpeechTokenFilter { + return s +} diff --git a/typedapi/types/kuromojireadingformtokenfilter.go b/typedapi/types/kuromojireadingformtokenfilter.go index 2000032327..6c754f09c8 100644 --- a/typedapi/types/kuromojireadingformtokenfilter.go +++ b/typedapi/types/kuromojireadingformtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KuromojiReadingFormTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/kuromoji-plugin.ts#L42-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/kuromoji-plugin.ts#L42-L45 type KuromojiReadingFormTokenFilter struct { Type string `json:"type,omitempty"` UseRomaji bool `json:"use_romaji"` @@ -102,3 +102,13 @@ func NewKuromojiReadingFormTokenFilter() *KuromojiReadingFormTokenFilter { return r } + +// true + +type KuromojiReadingFormTokenFilterVariant interface { + KuromojiReadingFormTokenFilterCaster() *KuromojiReadingFormTokenFilter +} + +func (s *KuromojiReadingFormTokenFilter) KuromojiReadingFormTokenFilterCaster() *KuromojiReadingFormTokenFilter { + return s +} diff --git a/typedapi/types/kuromojistemmertokenfilter.go b/typedapi/types/kuromojistemmertokenfilter.go index 5ad983fd80..c3acc37a84 100644 --- a/typedapi/types/kuromojistemmertokenfilter.go +++ b/typedapi/types/kuromojistemmertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // KuromojiStemmerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/kuromoji-plugin.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/kuromoji-plugin.ts#L47-L50 type KuromojiStemmerTokenFilter struct { MinimumLength int `json:"minimum_length"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewKuromojiStemmerTokenFilter() *KuromojiStemmerTokenFilter { return r } + +// true + +type KuromojiStemmerTokenFilterVariant interface { + KuromojiStemmerTokenFilterCaster() *KuromojiStemmerTokenFilter +} + +func (s *KuromojiStemmerTokenFilter) KuromojiStemmerTokenFilterCaster() *KuromojiStemmerTokenFilter { + return s +} diff --git a/typedapi/types/kuromojitokenizer.go b/typedapi/types/kuromojitokenizer.go index 9a3f149b84..b16ceee66b 100644 --- a/typedapi/types/kuromojitokenizer.go +++ b/typedapi/types/kuromojitokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // KuromojiTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/kuromoji-plugin.ts#L58-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/kuromoji-plugin.ts#L58-L67 type KuromojiTokenizer struct { DiscardCompoundToken *bool `json:"discard_compound_token,omitempty"` DiscardPunctuation *bool `json:"discard_punctuation,omitempty"` @@ -180,3 +180,13 @@ func NewKuromojiTokenizer() *KuromojiTokenizer { return r } + +// true + +type KuromojiTokenizerVariant interface { + KuromojiTokenizerCaster() *KuromojiTokenizer +} + +func (s *KuromojiTokenizer) KuromojiTokenizerCaster() *KuromojiTokenizer { + return s +} diff --git a/typedapi/types/languagecontext.go b/typedapi/types/languagecontext.go index ee4dfebdee..59f2a1dce7 100644 --- a/typedapi/types/languagecontext.go +++ b/typedapi/types/languagecontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // LanguageContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/get_script_languages/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/get_script_languages/types.ts#L22-L25 type LanguageContext struct { Contexts []string `json:"contexts"` Language scriptlanguage.ScriptLanguage `json:"language"` @@ -38,3 +38,5 @@ func NewLanguageContext() *LanguageContext { return r } + +// false diff --git a/typedapi/types/laplacesmoothingmodel.go b/typedapi/types/laplacesmoothingmodel.go index fc711fe1bd..063130e32b 100644 --- a/typedapi/types/laplacesmoothingmodel.go +++ b/typedapi/types/laplacesmoothingmodel.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LaplaceSmoothingModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L430-L435 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L430-L435 type LaplaceSmoothingModel struct { // Alpha A constant that is added to all counts to balance weights. Alpha Float64 `json:"alpha"` @@ -79,3 +79,13 @@ func NewLaplaceSmoothingModel() *LaplaceSmoothingModel { return r } + +// true + +type LaplaceSmoothingModelVariant interface { + LaplaceSmoothingModelCaster() *LaplaceSmoothingModel +} + +func (s *LaplaceSmoothingModel) LaplaceSmoothingModelCaster() *LaplaceSmoothingModel { + return s +} diff --git a/typedapi/types/latest.go b/typedapi/types/latest.go index afb6c57328..75eaee067e 100644 --- a/typedapi/types/latest.go +++ b/typedapi/types/latest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // Latest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L47-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L47-L52 type Latest struct { // Sort Specifies the date field that is used to identify the latest documents. Sort string `json:"sort"` @@ -74,3 +74,13 @@ func NewLatest() *Latest { return r } + +// true + +type LatestVariant interface { + LatestCaster() *Latest +} + +func (s *Latest) LatestCaster() *Latest { + return s +} diff --git a/typedapi/types/latlongeolocation.go b/typedapi/types/latlongeolocation.go index 8d14b01086..db35dff140 100644 --- a/typedapi/types/latlongeolocation.go +++ b/typedapi/types/latlongeolocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LatLonGeoLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L120-L129 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L120-L129 type LatLonGeoLocation struct { // Lat Latitude Lat Float64 `json:"lat"` @@ -97,3 +97,13 @@ func NewLatLonGeoLocation() *LatLonGeoLocation { return r } + +// true + +type LatLonGeoLocationVariant interface { + LatLonGeoLocationCaster() *LatLonGeoLocation +} + +func (s *LatLonGeoLocation) LatLonGeoLocationCaster() *LatLonGeoLocation { + return s +} diff --git a/typedapi/types/latviananalyzer.go b/typedapi/types/latviananalyzer.go new file mode 100644 index 0000000000..1ad7692e67 --- /dev/null +++ b/typedapi/types/latviananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LatvianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L214-L219 +type LatvianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *LatvianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LatvianAnalyzer) MarshalJSON() ([]byte, error) { + type innerLatvianAnalyzer LatvianAnalyzer + tmp := innerLatvianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "latvian" + + return json.Marshal(tmp) +} + +// NewLatvianAnalyzer returns a LatvianAnalyzer. +func NewLatvianAnalyzer() *LatvianAnalyzer { + r := &LatvianAnalyzer{} + + return r +} + +// true + +type LatvianAnalyzerVariant interface { + LatvianAnalyzerCaster() *LatvianAnalyzer +} + +func (s *LatvianAnalyzer) LatvianAnalyzerCaster() *LatvianAnalyzer { + return s +} diff --git a/typedapi/types/learningtorank.go b/typedapi/types/learningtorank.go index 7a124bd193..445ffbaf80 100644 --- a/typedapi/types/learningtorank.go +++ b/typedapi/types/learningtorank.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LearningToRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/rescoring.ts#L88-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/rescoring.ts#L88-L97 type LearningToRank struct { // ModelId The unique identifier of the trained model uploaded to Elasticsearch ModelId string `json:"model_id"` @@ -82,8 +82,18 @@ func (s *LearningToRank) UnmarshalJSON(data []byte) error { // NewLearningToRank returns a LearningToRank. func NewLearningToRank() *LearningToRank { r := &LearningToRank{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type LearningToRankVariant interface { + LearningToRankCaster() *LearningToRank +} + +func (s *LearningToRank) LearningToRankCaster() *LearningToRank { + return s +} diff --git a/typedapi/types/lengthtokenfilter.go b/typedapi/types/lengthtokenfilter.go index 7fc02d4887..b9e0011dac 100644 --- a/typedapi/types/lengthtokenfilter.go +++ b/typedapi/types/lengthtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LengthTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L245-L249 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L244-L248 type LengthTokenFilter struct { Max *int `json:"max,omitempty"` Min *int `json:"min,omitempty"` @@ -122,3 +122,13 @@ func NewLengthTokenFilter() *LengthTokenFilter { return r } + +// true + +type LengthTokenFilterVariant interface { + LengthTokenFilterCaster() *LengthTokenFilter +} + +func (s *LengthTokenFilter) LengthTokenFilterCaster() *LengthTokenFilter { + return s +} diff --git a/typedapi/types/lessthanvalidation.go b/typedapi/types/lessthanvalidation.go index f35ee6542a..e3f197240d 100644 --- a/typedapi/types/lessthanvalidation.go +++ b/typedapi/types/lessthanvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LessThanValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L58-L61 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L58-L61 type LessThanValidation struct { Constraint Float64 `json:"constraint"` Type string `json:"type,omitempty"` @@ -97,3 +97,13 @@ func NewLessThanValidation() *LessThanValidation { return r } + +// true + +type LessThanValidationVariant interface { + LessThanValidationCaster() *LessThanValidation +} + +func (s *LessThanValidation) LessThanValidationCaster() *LessThanValidation { + return s +} diff --git a/typedapi/types/lettertokenizer.go b/typedapi/types/lettertokenizer.go index 5a23bababb..6f757b9e1b 100644 --- a/typedapi/types/lettertokenizer.go +++ b/typedapi/types/lettertokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // LetterTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L67-L69 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L76-L78 type LetterTokenizer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewLetterTokenizer() *LetterTokenizer { return r } + +// true + +type LetterTokenizerVariant interface { + LetterTokenizerCaster() *LetterTokenizer +} + +func (s *LetterTokenizer) LetterTokenizerCaster() *LetterTokenizer { + return s +} diff --git a/typedapi/types/license.go b/typedapi/types/license.go index be3ffc01fd..fbf63822a5 100644 --- a/typedapi/types/license.go +++ b/typedapi/types/license.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // License type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/_types/License.ts#L42-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/_types/License.ts#L42-L53 type License struct { ExpiryDateInMillis int64 `json:"expiry_date_in_millis"` IssueDateInMillis int64 `json:"issue_date_in_millis"` @@ -161,3 +161,13 @@ func NewLicense() *License { return r } + +// true + +type LicenseVariant interface { + LicenseCaster() *License +} + +func (s *License) LicenseCaster() *License { + return s +} diff --git a/typedapi/types/licenseinformation.go b/typedapi/types/licenseinformation.go index f60599540f..82ceabe5d9 100644 --- a/typedapi/types/licenseinformation.go +++ b/typedapi/types/licenseinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // LicenseInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/license/get/types.ts#L25-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/license/get/types.ts#L25-L38 type LicenseInformation struct { ExpiryDate DateTime `json:"expiry_date,omitempty"` ExpiryDateInMillis *int64 `json:"expiry_date_in_millis,omitempty"` @@ -150,3 +150,5 @@ func NewLicenseInformation() *LicenseInformation { return r } + +// false diff --git a/typedapi/types/lifecycle.go b/typedapi/types/lifecycle.go index 5d052bdb4f..4b801fe889 100644 --- a/typedapi/types/lifecycle.go +++ b/typedapi/types/lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // Lifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/get_lifecycle/types.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/get_lifecycle/types.ts#L24-L28 type Lifecycle struct { ModifiedDate DateTime `json:"modified_date"` Policy IlmPolicy `json:"policy"` @@ -78,3 +78,5 @@ func NewLifecycle() *Lifecycle { return r } + +// false diff --git a/typedapi/types/lifecycleexplain.go b/typedapi/types/lifecycleexplain.go index 77f4104575..f7593f4f1a 100644 --- a/typedapi/types/lifecycleexplain.go +++ b/typedapi/types/lifecycleexplain.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // LifecycleExplainManaged // LifecycleExplainUnmanaged // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/explain_lifecycle/types.ts#L59-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/explain_lifecycle/types.ts#L64-L67 type LifecycleExplain any diff --git a/typedapi/types/lifecycleexplainmanaged.go b/typedapi/types/lifecycleexplainmanaged.go index 54a1ae6386..4186864dcc 100644 --- a/typedapi/types/lifecycleexplainmanaged.go +++ b/typedapi/types/lifecycleexplainmanaged.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LifecycleExplainManaged type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/explain_lifecycle/types.ts#L26-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/explain_lifecycle/types.ts#L27-L57 type LifecycleExplainManaged struct { Action *string `json:"action,omitempty"` ActionTime DateTime `json:"action_time,omitempty"` @@ -39,7 +39,7 @@ type LifecycleExplainManaged struct { Age Duration `json:"age,omitempty"` FailedStep *string `json:"failed_step,omitempty"` FailedStepRetryCount *int `json:"failed_step_retry_count,omitempty"` - Index *string `json:"index,omitempty"` + Index string `json:"index"` IndexCreationDate DateTime `json:"index_creation_date,omitempty"` IndexCreationDateMillis *int64 `json:"index_creation_date_millis,omitempty"` IsAutoRetryableError *bool `json:"is_auto_retryable_error,omitempty"` @@ -50,7 +50,11 @@ type LifecycleExplainManaged struct { PhaseExecution *LifecycleExplainPhaseExecution `json:"phase_execution,omitempty"` PhaseTime DateTime `json:"phase_time,omitempty"` PhaseTimeMillis *int64 `json:"phase_time_millis,omitempty"` - Policy string `json:"policy"` + Policy *string `json:"policy,omitempty"` + PreviousStepInfo map[string]json.RawMessage `json:"previous_step_info,omitempty"` + RepositoryName *string `json:"repository_name,omitempty"` + ShrinkIndexName *string `json:"shrink_index_name,omitempty"` + SnapshotName *string `json:"snapshot_name,omitempty"` Step *string `json:"step,omitempty"` StepInfo map[string]json.RawMessage `json:"step_info,omitempty"` StepTime DateTime `json:"step_time,omitempty"` @@ -183,6 +187,50 @@ func (s *LifecycleExplainManaged) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Policy", err) } + case "previous_step_info": + if s.PreviousStepInfo == nil { + s.PreviousStepInfo = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.PreviousStepInfo); err != nil { + return fmt.Errorf("%s | %w", "PreviousStepInfo", err) + } + + case "repository_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RepositoryName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RepositoryName = &o + + case "shrink_index_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ShrinkIndexName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ShrinkIndexName = &o + + case "snapshot_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SnapshotName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SnapshotName = &o + case "step": if err := dec.Decode(&s.Step); err != nil { return fmt.Errorf("%s | %w", "Step", err) @@ -238,6 +286,10 @@ func (s LifecycleExplainManaged) MarshalJSON() ([]byte, error) { PhaseTime: s.PhaseTime, PhaseTimeMillis: s.PhaseTimeMillis, Policy: s.Policy, + PreviousStepInfo: s.PreviousStepInfo, + RepositoryName: s.RepositoryName, + ShrinkIndexName: s.ShrinkIndexName, + SnapshotName: s.SnapshotName, Step: s.Step, StepInfo: s.StepInfo, StepTime: s.StepTime, @@ -253,8 +305,11 @@ func (s LifecycleExplainManaged) MarshalJSON() ([]byte, error) { // NewLifecycleExplainManaged returns a LifecycleExplainManaged. func NewLifecycleExplainManaged() *LifecycleExplainManaged { r := &LifecycleExplainManaged{ - StepInfo: make(map[string]json.RawMessage, 0), + PreviousStepInfo: make(map[string]json.RawMessage), + StepInfo: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/lifecycleexplainphaseexecution.go b/typedapi/types/lifecycleexplainphaseexecution.go index 979d8e9f58..76c951e51e 100644 --- a/typedapi/types/lifecycleexplainphaseexecution.go +++ b/typedapi/types/lifecycleexplainphaseexecution.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,9 +30,10 @@ import ( // LifecycleExplainPhaseExecution type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/explain_lifecycle/types.ts#L64-L68 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/explain_lifecycle/types.ts#L69-L74 type LifecycleExplainPhaseExecution struct { ModifiedDateInMillis int64 `json:"modified_date_in_millis"` + PhaseDefinition *Phase `json:"phase_definition,omitempty"` Policy string `json:"policy"` Version int64 `json:"version"` } @@ -57,6 +58,11 @@ func (s *LifecycleExplainPhaseExecution) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ModifiedDateInMillis", err) } + case "phase_definition": + if err := dec.Decode(&s.PhaseDefinition); err != nil { + return fmt.Errorf("%s | %w", "PhaseDefinition", err) + } + case "policy": if err := dec.Decode(&s.Policy); err != nil { return fmt.Errorf("%s | %w", "Policy", err) @@ -78,3 +84,5 @@ func NewLifecycleExplainPhaseExecution() *LifecycleExplainPhaseExecution { return r } + +// false diff --git a/typedapi/types/lifecycleexplainunmanaged.go b/typedapi/types/lifecycleexplainunmanaged.go index e13392e8e5..884d17d3df 100644 --- a/typedapi/types/lifecycleexplainunmanaged.go +++ b/typedapi/types/lifecycleexplainunmanaged.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // LifecycleExplainUnmanaged type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/explain_lifecycle/types.ts#L54-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/explain_lifecycle/types.ts#L59-L62 type LifecycleExplainUnmanaged struct { Index string `json:"index"` Managed bool `json:"managed,omitempty"` @@ -85,3 +85,5 @@ func NewLifecycleExplainUnmanaged() *LifecycleExplainUnmanaged { return r } + +// false diff --git a/typedapi/types/like.go b/typedapi/types/like.go index 91732d6341..fed8ca4db0 100644 --- a/typedapi/types/like.go +++ b/typedapi/types/like.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // LikeDocument // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L195-L200 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L198-L203 type Like any + +type LikeVariant interface { + LikeCaster() *Like +} diff --git a/typedapi/types/likedocument.go b/typedapi/types/likedocument.go index 9c3a89a124..754ae7600b 100644 --- a/typedapi/types/likedocument.go +++ b/typedapi/types/likedocument.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // LikeDocument type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L171-L193 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L174-L196 type LikeDocument struct { // Doc A document not present in the index. Doc json.RawMessage `json:"doc,omitempty"` @@ -114,8 +114,18 @@ func (s *LikeDocument) UnmarshalJSON(data []byte) error { // NewLikeDocument returns a LikeDocument. func NewLikeDocument() *LikeDocument { r := &LikeDocument{ - PerFieldAnalyzer: make(map[string]string, 0), + PerFieldAnalyzer: make(map[string]string), } return r } + +// true + +type LikeDocumentVariant interface { + LikeDocumentCaster() *LikeDocument +} + +func (s *LikeDocument) LikeDocumentCaster() *LikeDocument { + return s +} diff --git a/typedapi/types/limits.go b/typedapi/types/limits.go index d96503bc49..a1e2d66fb2 100644 --- a/typedapi/types/limits.go +++ b/typedapi/types/limits.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,13 @@ import ( // Limits type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/info/types.ts#L34-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/info/types.ts#L34-L40 type Limits struct { - EffectiveMaxModelMemoryLimit string `json:"effective_max_model_memory_limit"` - MaxModelMemoryLimit *string `json:"max_model_memory_limit,omitempty"` - TotalMlMemory string `json:"total_ml_memory"` + EffectiveMaxModelMemoryLimit ByteSize `json:"effective_max_model_memory_limit,omitempty"` + MaxModelMemoryLimit ByteSize `json:"max_model_memory_limit,omitempty"` + MaxSingleMlNodeProcessors *int `json:"max_single_ml_node_processors,omitempty"` + TotalMlMemory ByteSize `json:"total_ml_memory"` + TotalMlProcessors *int `json:"total_ml_processors,omitempty"` } func (s *Limits) UnmarshalJSON(data []byte) error { @@ -54,40 +56,51 @@ func (s *Limits) UnmarshalJSON(data []byte) error { switch t { case "effective_max_model_memory_limit": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.EffectiveMaxModelMemoryLimit); err != nil { return fmt.Errorf("%s | %w", "EffectiveMaxModelMemoryLimit", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.EffectiveMaxModelMemoryLimit = o case "max_model_memory_limit": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.MaxModelMemoryLimit); err != nil { return fmt.Errorf("%s | %w", "MaxModelMemoryLimit", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) + + case "max_single_ml_node_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSingleMlNodeProcessors", err) + } + s.MaxSingleMlNodeProcessors = &value + case float64: + f := int(v) + s.MaxSingleMlNodeProcessors = &f } - s.MaxModelMemoryLimit = &o case "total_ml_memory": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.TotalMlMemory); err != nil { return fmt.Errorf("%s | %w", "TotalMlMemory", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) + + case "total_ml_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalMlProcessors", err) + } + s.TotalMlProcessors = &value + case float64: + f := int(v) + s.TotalMlProcessors = &f } - s.TotalMlMemory = o } } @@ -100,3 +113,5 @@ func NewLimits() *Limits { return r } + +// false diff --git a/typedapi/types/limittokencounttokenfilter.go b/typedapi/types/limittokencounttokenfilter.go index 6ac17a2630..c0b0949720 100644 --- a/typedapi/types/limittokencounttokenfilter.go +++ b/typedapi/types/limittokencounttokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LimitTokenCountTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L251-L255 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L250-L254 type LimitTokenCountTokenFilter struct { ConsumeAllTokens *bool `json:"consume_all_tokens,omitempty"` MaxTokenCount Stringifiedinteger `json:"max_token_count,omitempty"` @@ -109,3 +109,13 @@ func NewLimitTokenCountTokenFilter() *LimitTokenCountTokenFilter { return r } + +// true + +type LimitTokenCountTokenFilterVariant interface { + LimitTokenCountTokenFilterCaster() *LimitTokenCountTokenFilter +} + +func (s *LimitTokenCountTokenFilter) LimitTokenCountTokenFilterCaster() *LimitTokenCountTokenFilter { + return s +} diff --git a/typedapi/types/linearinterpolationsmoothingmodel.go b/typedapi/types/linearinterpolationsmoothingmodel.go index 682b0a0023..f48bfe0aaa 100644 --- a/typedapi/types/linearinterpolationsmoothingmodel.go +++ b/typedapi/types/linearinterpolationsmoothingmodel.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LinearInterpolationSmoothingModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L437-L441 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L437-L441 type LinearInterpolationSmoothingModel struct { BigramLambda Float64 `json:"bigram_lambda"` TrigramLambda Float64 `json:"trigram_lambda"` @@ -112,3 +112,13 @@ func NewLinearInterpolationSmoothingModel() *LinearInterpolationSmoothingModel { return r } + +// true + +type LinearInterpolationSmoothingModelVariant interface { + LinearInterpolationSmoothingModelCaster() *LinearInterpolationSmoothingModel +} + +func (s *LinearInterpolationSmoothingModel) LinearInterpolationSmoothingModelCaster() *LinearInterpolationSmoothingModel { + return s +} diff --git a/typedapi/types/linearmovingaverageaggregation.go b/typedapi/types/linearmovingaverageaggregation.go index b61113b0bb..63822fe5cd 100644 --- a/typedapi/types/linearmovingaverageaggregation.go +++ b/typedapi/types/linearmovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // LinearMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L242-L245 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L268-L271 type LinearMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewLinearMovingAverageAggregation() *LinearMovingAverageAggregation { return r } + +// true + +type LinearMovingAverageAggregationVariant interface { + LinearMovingAverageAggregationCaster() *LinearMovingAverageAggregation +} + +func (s *LinearMovingAverageAggregation) LinearMovingAverageAggregationCaster() *LinearMovingAverageAggregation { + return s +} diff --git a/typedapi/types/listtypevalidation.go b/typedapi/types/listtypevalidation.go index 3a4dcb0646..833f20b0f3 100644 --- a/typedapi/types/listtypevalidation.go +++ b/typedapi/types/listtypevalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ListTypeValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L68-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L68-L71 type ListTypeValidation struct { Constraint string `json:"constraint"` Type string `json:"type,omitempty"` @@ -93,3 +93,13 @@ func NewListTypeValidation() *ListTypeValidation { return r } + +// true + +type ListTypeValidationVariant interface { + ListTypeValidationCaster() *ListTypeValidation +} + +func (s *ListTypeValidation) ListTypeValidationCaster() *ListTypeValidation { + return s +} diff --git a/typedapi/types/lithuaniananalyzer.go b/typedapi/types/lithuaniananalyzer.go new file mode 100644 index 0000000000..9a99327f84 --- /dev/null +++ b/typedapi/types/lithuaniananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LithuanianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L221-L226 +type LithuanianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *LithuanianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LithuanianAnalyzer) MarshalJSON() ([]byte, error) { + type innerLithuanianAnalyzer LithuanianAnalyzer + tmp := innerLithuanianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "lithuanian" + + return json.Marshal(tmp) +} + +// NewLithuanianAnalyzer returns a LithuanianAnalyzer. +func NewLithuanianAnalyzer() *LithuanianAnalyzer { + r := &LithuanianAnalyzer{} + + return r +} + +// true + +type LithuanianAnalyzerVariant interface { + LithuanianAnalyzerCaster() *LithuanianAnalyzer +} + +func (s *LithuanianAnalyzer) LithuanianAnalyzerCaster() *LithuanianAnalyzer { + return s +} diff --git a/typedapi/types/helprecord.go b/typedapi/types/local.go similarity index 70% rename from typedapi/types/helprecord.go rename to typedapi/types/local.go index 37adb37c13..f8b519baa2 100644 --- a/typedapi/types/helprecord.go +++ b/typedapi/types/local.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,14 +29,14 @@ import ( "strconv" ) -// HelpRecord type. +// Local type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/help/types.ts#L20-L22 -type HelpRecord struct { - Endpoint string `json:"endpoint"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Database.ts#L63-L65 +type Local struct { + Type string `json:"type"` } -func (s *HelpRecord) UnmarshalJSON(data []byte) error { +func (s *Local) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -51,26 +51,28 @@ func (s *HelpRecord) UnmarshalJSON(data []byte) error { switch t { - case "endpoint": + case "type": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Endpoint", err) + return fmt.Errorf("%s | %w", "Type", err) } o := string(tmp[:]) o, err = strconv.Unquote(o) if err != nil { o = string(tmp[:]) } - s.Endpoint = o + s.Type = o } } return nil } -// NewHelpRecord returns a HelpRecord. -func NewHelpRecord() *HelpRecord { - r := &HelpRecord{} +// NewLocal returns a Local. +func NewLocal() *Local { + r := &Local{} return r } + +// false diff --git a/typedapi/types/loggingaction.go b/typedapi/types/loggingaction.go index 3d7a7c8bcb..9fa2c2915d 100644 --- a/typedapi/types/loggingaction.go +++ b/typedapi/types/loggingaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LoggingAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L281-L285 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L281-L285 type LoggingAction struct { Category *string `json:"category,omitempty"` Level *string `json:"level,omitempty"` @@ -100,3 +100,13 @@ func NewLoggingAction() *LoggingAction { return r } + +// true + +type LoggingActionVariant interface { + LoggingActionCaster() *LoggingAction +} + +func (s *LoggingAction) LoggingActionCaster() *LoggingAction { + return s +} diff --git a/typedapi/types/loggingresult.go b/typedapi/types/loggingresult.go index c2023c8c33..69638a37a5 100644 --- a/typedapi/types/loggingresult.go +++ b/typedapi/types/loggingresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LoggingResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L287-L289 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L287-L289 type LoggingResult struct { LoggedText string `json:"logged_text"` } @@ -74,3 +74,5 @@ func NewLoggingResult() *LoggingResult { return r } + +// false diff --git a/typedapi/types/logstashpipeline.go b/typedapi/types/logstashpipeline.go index 7c3bbc4790..981e42371a 100644 --- a/typedapi/types/logstashpipeline.go +++ b/typedapi/types/logstashpipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,24 +31,23 @@ import ( // LogstashPipeline type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/logstash/_types/Pipeline.ts#L60-L92 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/logstash/_types/Pipeline.ts#L60-L91 type LogstashPipeline struct { - // Description Description of the pipeline. + // Description A description of the pipeline. // This description is not used by Elasticsearch or Logstash. Description string `json:"description"` - // LastModified Date the pipeline was last updated. - // Must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. + // LastModified The date the pipeline was last updated. + // It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. LastModified DateTime `json:"last_modified"` - // Pipeline Configuration for the pipeline. + // Pipeline The configuration for the pipeline. Pipeline string `json:"pipeline"` - // PipelineMetadata Optional metadata about the pipeline. - // May have any contents. + // PipelineMetadata Optional metadata about the pipeline, which can have any contents. // This metadata is not generated or used by Elasticsearch or Logstash. PipelineMetadata PipelineMetadata `json:"pipeline_metadata"` // PipelineSettings Settings for the pipeline. - // Supports only flat keys in dot notation. + // It supports only flat keys in dot notation. PipelineSettings PipelineSettings `json:"pipeline_settings"` - // Username User who last updated the pipeline. + // Username The user who last updated the pipeline. Username string `json:"username"` } @@ -129,3 +128,13 @@ func NewLogstashPipeline() *LogstashPipeline { return r } + +// true + +type LogstashPipelineVariant interface { + LogstashPipelineCaster() *LogstashPipeline +} + +func (s *LogstashPipeline) LogstashPipelineCaster() *LogstashPipeline { + return s +} diff --git a/typedapi/types/longnumberproperty.go b/typedapi/types/longnumberproperty.go index c76182e005..517c768bdc 100644 --- a/typedapi/types/longnumberproperty.go +++ b/typedapi/types/longnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // LongNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L162-L165 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L166-L169 type LongNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,13 +48,13 @@ type LongNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *int64 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *int64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +163,313 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -553,301 +566,313 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -858,18 +883,6 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -884,6 +897,11 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -931,8 +949,8 @@ func (s LongNumberProperty) MarshalJSON() ([]byte, error) { OnScriptError: s.OnScriptError, Properties: s.Properties, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -946,10 +964,20 @@ func (s LongNumberProperty) MarshalJSON() ([]byte, error) { // NewLongNumberProperty returns a LongNumberProperty. func NewLongNumberProperty() *LongNumberProperty { r := &LongNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type LongNumberPropertyVariant interface { + LongNumberPropertyCaster() *LongNumberProperty +} + +func (s *LongNumberProperty) LongNumberPropertyCaster() *LongNumberProperty { + return s +} diff --git a/typedapi/types/longrangeproperty.go b/typedapi/types/longrangeproperty.go index e16da2f72b..7225907486 100644 --- a/typedapi/types/longrangeproperty.go +++ b/typedapi/types/longrangeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // LongRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/range.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/range.ts#L50-L52 type LongRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -44,11 +45,11 @@ type LongRangeProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { @@ -150,301 +151,313 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -507,318 +520,318 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -833,6 +846,11 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -847,19 +865,19 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { func (s LongRangeProperty) MarshalJSON() ([]byte, error) { type innerLongRangeProperty LongRangeProperty tmp := innerLongRangeProperty{ - Boost: s.Boost, - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Index: s.Index, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "long_range" @@ -870,10 +888,20 @@ func (s LongRangeProperty) MarshalJSON() ([]byte, error) { // NewLongRangeProperty returns a LongRangeProperty. func NewLongRangeProperty() *LongRangeProperty { r := &LongRangeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type LongRangePropertyVariant interface { + LongRangePropertyCaster() *LongRangeProperty +} + +func (s *LongRangeProperty) LongRangePropertyCaster() *LongRangeProperty { + return s +} diff --git a/typedapi/types/longraretermsaggregate.go b/typedapi/types/longraretermsaggregate.go index 3f9bfed6f2..281a3ff31f 100644 --- a/typedapi/types/longraretermsaggregate.go +++ b/typedapi/types/longraretermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // LongRareTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L433-L438 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L471-L476 type LongRareTermsAggregate struct { Buckets BucketsLongRareTermsBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewLongRareTermsAggregate() *LongRareTermsAggregate { return r } + +// false diff --git a/typedapi/types/longraretermsbucket.go b/typedapi/types/longraretermsbucket.go index 5136d4e785..18e27de733 100644 --- a/typedapi/types/longraretermsbucket.go +++ b/typedapi/types/longraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // LongRareTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L440-L443 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L478-L481 type LongRareTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -514,6 +514,13 @@ func (s *LongRareTermsBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -647,8 +654,10 @@ func (s LongRareTermsBucket) MarshalJSON() ([]byte, error) { // NewLongRareTermsBucket returns a LongRareTermsBucket. func NewLongRareTermsBucket() *LongRareTermsBucket { r := &LongRareTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/longtermsaggregate.go b/typedapi/types/longtermsaggregate.go index 15acd795ea..df5fe76485 100644 --- a/typedapi/types/longtermsaggregate.go +++ b/typedapi/types/longtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LongTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L401-L406 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L439-L444 type LongTermsAggregate struct { Buckets BucketsLongTermsBucket `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewLongTermsAggregate() *LongTermsAggregate { return r } + +// false diff --git a/typedapi/types/longtermsbucket.go b/typedapi/types/longtermsbucket.go index 53dbb0868a..59965ee1ac 100644 --- a/typedapi/types/longtermsbucket.go +++ b/typedapi/types/longtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // LongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L408-L411 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L446-L449 type LongTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -530,6 +530,13 @@ func (s *LongTermsBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -663,8 +670,10 @@ func (s LongTermsBucket) MarshalJSON() ([]byte, error) { // NewLongTermsBucket returns a LongTermsBucket. func NewLongTermsBucket() *LongTermsBucket { r := &LongTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/lowercasenormalizer.go b/typedapi/types/lowercasenormalizer.go index a1d5f9d4a4..720b7bb481 100644 --- a/typedapi/types/lowercasenormalizer.go +++ b/typedapi/types/lowercasenormalizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // LowercaseNormalizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/normalizers.ts#L26-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/normalizers.ts#L26-L28 type LowercaseNormalizer struct { Type string `json:"type,omitempty"` } @@ -49,3 +49,13 @@ func NewLowercaseNormalizer() *LowercaseNormalizer { return r } + +// true + +type LowercaseNormalizerVariant interface { + LowercaseNormalizerCaster() *LowercaseNormalizer +} + +func (s *LowercaseNormalizer) LowercaseNormalizerCaster() *LowercaseNormalizer { + return s +} diff --git a/typedapi/types/lowercaseprocessor.go b/typedapi/types/lowercaseprocessor.go index 25eab4093b..19c7fb491d 100644 --- a/typedapi/types/lowercaseprocessor.go +++ b/typedapi/types/lowercaseprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LowercaseProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L910-L926 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1212-L1228 type LowercaseProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -160,3 +160,13 @@ func NewLowercaseProcessor() *LowercaseProcessor { return r } + +// true + +type LowercaseProcessorVariant interface { + LowercaseProcessorCaster() *LowercaseProcessor +} + +func (s *LowercaseProcessor) LowercaseProcessorCaster() *LowercaseProcessor { + return s +} diff --git a/typedapi/types/lowercasetokenfilter.go b/typedapi/types/lowercasetokenfilter.go index f9dfdfdda7..4d1e564732 100644 --- a/typedapi/types/lowercasetokenfilter.go +++ b/typedapi/types/lowercasetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // LowercaseTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L257-L260 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L256-L259 type LowercaseTokenFilter struct { Language *string `json:"language,omitempty"` Type string `json:"type,omitempty"` @@ -100,3 +100,13 @@ func NewLowercaseTokenFilter() *LowercaseTokenFilter { return r } + +// true + +type LowercaseTokenFilterVariant interface { + LowercaseTokenFilterCaster() *LowercaseTokenFilter +} + +func (s *LowercaseTokenFilter) LowercaseTokenFilterCaster() *LowercaseTokenFilter { + return s +} diff --git a/typedapi/types/lowercasetokenizer.go b/typedapi/types/lowercasetokenizer.go index 1e99cc9399..c74f520429 100644 --- a/typedapi/types/lowercasetokenizer.go +++ b/typedapi/types/lowercasetokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // LowercaseTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L71-L73 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L80-L82 type LowercaseTokenizer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewLowercaseTokenizer() *LowercaseTokenizer { return r } + +// true + +type LowercaseTokenizerVariant interface { + LowercaseTokenizerCaster() *LowercaseTokenizer +} + +func (s *LowercaseTokenizer) LowercaseTokenizerCaster() *LowercaseTokenizer { + return s +} diff --git a/typedapi/types/machinelearning.go b/typedapi/types/machinelearning.go index f6a9ce1872..46ac49a2bd 100644 --- a/typedapi/types/machinelearning.go +++ b/typedapi/types/machinelearning.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MachineLearning type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L372-L379 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L382-L389 type MachineLearning struct { Available bool `json:"available"` DataFrameAnalyticsJobs MlDataFrameAnalyticsJobs `json:"data_frame_analytics_jobs"` @@ -137,9 +137,11 @@ func (s *MachineLearning) UnmarshalJSON(data []byte) error { // NewMachineLearning returns a MachineLearning. func NewMachineLearning() *MachineLearning { r := &MachineLearning{ - Datafeeds: make(map[string]XpackDatafeed, 0), - Jobs: make(map[string]JobUsage, 0), + Datafeeds: make(map[string]XpackDatafeed), + Jobs: make(map[string]JobUsage), } return r } + +// false diff --git a/typedapi/types/manageuserprivileges.go b/typedapi/types/manageuserprivileges.go index 76346dc199..950ba116ca 100644 --- a/typedapi/types/manageuserprivileges.go +++ b/typedapi/types/manageuserprivileges.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ManageUserPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L344-L346 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L438-L440 type ManageUserPrivileges struct { Applications []string `json:"applications"` } @@ -33,3 +33,13 @@ func NewManageUserPrivileges() *ManageUserPrivileges { return r } + +// true + +type ManageUserPrivilegesVariant interface { + ManageUserPrivilegesCaster() *ManageUserPrivileges +} + +func (s *ManageUserPrivileges) ManageUserPrivilegesCaster() *ManageUserPrivileges { + return s +} diff --git a/typedapi/types/mapboxvectortiles.go b/typedapi/types/mapboxvectortiles.go index 1d665721d6..a79c25c615 100644 --- a/typedapi/types/mapboxvectortiles.go +++ b/typedapi/types/mapboxvectortiles.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // MapboxVectorTiles type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Binary.ts#L21-L21 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Binary.ts#L21-L21 type MapboxVectorTiles []byte diff --git a/typedapi/types/mappingcharfilter.go b/typedapi/types/mappingcharfilter.go index 80772b47b5..0807689104 100644 --- a/typedapi/types/mappingcharfilter.go +++ b/typedapi/types/mappingcharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MappingCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/char_filters.ts#L48-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/char_filters.ts#L51-L55 type MappingCharFilter struct { Mappings []string `json:"mappings,omitempty"` MappingsPath *string `json:"mappings_path,omitempty"` @@ -107,3 +107,13 @@ func NewMappingCharFilter() *MappingCharFilter { return r } + +// true + +type MappingCharFilterVariant interface { + MappingCharFilterCaster() *MappingCharFilter +} + +func (s *MappingCharFilter) MappingCharFilterCaster() *MappingCharFilter { + return s +} diff --git a/typedapi/types/mappinglimitsettings.go b/typedapi/types/mappinglimitsettings.go index 2cc31e98d5..d439913905 100644 --- a/typedapi/types/mappinglimitsettings.go +++ b/typedapi/types/mappinglimitsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,15 +31,16 @@ import ( // MappingLimitSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L411-L424 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L425-L439 type MappingLimitSettings struct { Coerce *bool `json:"coerce,omitempty"` Depth *MappingLimitSettingsDepth `json:"depth,omitempty"` DimensionFields *MappingLimitSettingsDimensionFields `json:"dimension_fields,omitempty"` FieldNameLength *MappingLimitSettingsFieldNameLength `json:"field_name_length,omitempty"` - IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + IgnoreMalformed string `json:"ignore_malformed,omitempty"` NestedFields *MappingLimitSettingsNestedFields `json:"nested_fields,omitempty"` NestedObjects *MappingLimitSettingsNestedObjects `json:"nested_objects,omitempty"` + Source *MappingLimitSettingsSourceFields `json:"source,omitempty"` TotalFields *MappingLimitSettingsTotalFields `json:"total_fields,omitempty"` } @@ -88,18 +89,16 @@ func (s *MappingLimitSettings) UnmarshalJSON(data []byte) error { } case "ignore_malformed": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "IgnoreMalformed", err) - } - s.IgnoreMalformed = &value - case bool: - s.IgnoreMalformed = &v + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IgnoreMalformed = o case "nested_fields": if err := dec.Decode(&s.NestedFields); err != nil { @@ -111,6 +110,11 @@ func (s *MappingLimitSettings) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "NestedObjects", err) } + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + case "total_fields": if err := dec.Decode(&s.TotalFields); err != nil { return fmt.Errorf("%s | %w", "TotalFields", err) @@ -127,3 +131,13 @@ func NewMappingLimitSettings() *MappingLimitSettings { return r } + +// true + +type MappingLimitSettingsVariant interface { + MappingLimitSettingsCaster() *MappingLimitSettings +} + +func (s *MappingLimitSettings) MappingLimitSettingsCaster() *MappingLimitSettings { + return s +} diff --git a/typedapi/types/mappinglimitsettingsdepth.go b/typedapi/types/mappinglimitsettingsdepth.go index af60c81c99..25b0e837a2 100644 --- a/typedapi/types/mappinglimitsettingsdepth.go +++ b/typedapi/types/mappinglimitsettingsdepth.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsDepth type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L445-L452 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L460-L467 type MappingLimitSettingsDepth struct { // Limit The maximum depth for a field, which is measured as the number of inner // objects. For instance, if all fields are defined @@ -81,3 +81,13 @@ func NewMappingLimitSettingsDepth() *MappingLimitSettingsDepth { return r } + +// true + +type MappingLimitSettingsDepthVariant interface { + MappingLimitSettingsDepthCaster() *MappingLimitSettingsDepth +} + +func (s *MappingLimitSettingsDepth) MappingLimitSettingsDepthCaster() *MappingLimitSettingsDepth { + return s +} diff --git a/typedapi/types/mappinglimitsettingsdimensionfields.go b/typedapi/types/mappinglimitsettingsdimensionfields.go index 57d0cba136..8e8f8b1936 100644 --- a/typedapi/types/mappinglimitsettingsdimensionfields.go +++ b/typedapi/types/mappinglimitsettingsdimensionfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsDimensionFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L482-L488 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L497-L503 type MappingLimitSettingsDimensionFields struct { // Limit [preview] This functionality is in technical preview and may be changed or // removed in a future release. @@ -81,3 +81,13 @@ func NewMappingLimitSettingsDimensionFields() *MappingLimitSettingsDimensionFiel return r } + +// true + +type MappingLimitSettingsDimensionFieldsVariant interface { + MappingLimitSettingsDimensionFieldsCaster() *MappingLimitSettingsDimensionFields +} + +func (s *MappingLimitSettingsDimensionFields) MappingLimitSettingsDimensionFieldsCaster() *MappingLimitSettingsDimensionFields { + return s +} diff --git a/typedapi/types/mappinglimitsettingsfieldnamelength.go b/typedapi/types/mappinglimitsettingsfieldnamelength.go index 9feb787133..9158944e8f 100644 --- a/typedapi/types/mappinglimitsettingsfieldnamelength.go +++ b/typedapi/types/mappinglimitsettingsfieldnamelength.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsFieldNameLength type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L473-L480 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L488-L495 type MappingLimitSettingsFieldNameLength struct { // Limit Setting for the maximum length of a field name. This setting isn’t really // something that addresses mappings explosion but @@ -83,3 +83,13 @@ func NewMappingLimitSettingsFieldNameLength() *MappingLimitSettingsFieldNameLeng return r } + +// true + +type MappingLimitSettingsFieldNameLengthVariant interface { + MappingLimitSettingsFieldNameLengthCaster() *MappingLimitSettingsFieldNameLength +} + +func (s *MappingLimitSettingsFieldNameLength) MappingLimitSettingsFieldNameLengthCaster() *MappingLimitSettingsFieldNameLength { + return s +} diff --git a/typedapi/types/mappinglimitsettingsnestedfields.go b/typedapi/types/mappinglimitsettingsnestedfields.go index 8d397c4ba5..59bf80d17a 100644 --- a/typedapi/types/mappinglimitsettingsnestedfields.go +++ b/typedapi/types/mappinglimitsettingsnestedfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsNestedFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L454-L462 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L469-L477 type MappingLimitSettingsNestedFields struct { // Limit The maximum number of distinct nested mappings in an index. The nested type // should only be used in special cases, when @@ -82,3 +82,13 @@ func NewMappingLimitSettingsNestedFields() *MappingLimitSettingsNestedFields { return r } + +// true + +type MappingLimitSettingsNestedFieldsVariant interface { + MappingLimitSettingsNestedFieldsCaster() *MappingLimitSettingsNestedFields +} + +func (s *MappingLimitSettingsNestedFields) MappingLimitSettingsNestedFieldsCaster() *MappingLimitSettingsNestedFields { + return s +} diff --git a/typedapi/types/mappinglimitsettingsnestedobjects.go b/typedapi/types/mappinglimitsettingsnestedobjects.go index dd0ad5a2c7..6fbc050537 100644 --- a/typedapi/types/mappinglimitsettingsnestedobjects.go +++ b/typedapi/types/mappinglimitsettingsnestedobjects.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsNestedObjects type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L464-L471 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L479-L486 type MappingLimitSettingsNestedObjects struct { // Limit The maximum number of nested JSON objects that a single document can contain // across all nested types. This limit helps @@ -81,3 +81,13 @@ func NewMappingLimitSettingsNestedObjects() *MappingLimitSettingsNestedObjects { return r } + +// true + +type MappingLimitSettingsNestedObjectsVariant interface { + MappingLimitSettingsNestedObjectsCaster() *MappingLimitSettingsNestedObjects +} + +func (s *MappingLimitSettingsNestedObjects) MappingLimitSettingsNestedObjectsCaster() *MappingLimitSettingsNestedObjects { + return s +} diff --git a/typedapi/types/mappinglimitsettingssourcefields.go b/typedapi/types/mappinglimitsettingssourcefields.go new file mode 100644 index 0000000000..e9f7b3908a --- /dev/null +++ b/typedapi/types/mappinglimitsettingssourcefields.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sourcemode" +) + +// MappingLimitSettingsSourceFields type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L505-L507 +type MappingLimitSettingsSourceFields struct { + Mode sourcemode.SourceMode `json:"mode"` +} + +// NewMappingLimitSettingsSourceFields returns a MappingLimitSettingsSourceFields. +func NewMappingLimitSettingsSourceFields() *MappingLimitSettingsSourceFields { + r := &MappingLimitSettingsSourceFields{} + + return r +} + +// true + +type MappingLimitSettingsSourceFieldsVariant interface { + MappingLimitSettingsSourceFieldsCaster() *MappingLimitSettingsSourceFields +} + +func (s *MappingLimitSettingsSourceFields) MappingLimitSettingsSourceFieldsCaster() *MappingLimitSettingsSourceFields { + return s +} diff --git a/typedapi/types/mappinglimitsettingstotalfields.go b/typedapi/types/mappinglimitsettingstotalfields.go index 5b979c1100..d7d92051c3 100644 --- a/typedapi/types/mappinglimitsettingstotalfields.go +++ b/typedapi/types/mappinglimitsettingstotalfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MappingLimitSettingsTotalFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L426-L443 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L441-L458 type MappingLimitSettingsTotalFields struct { // IgnoreDynamicBeyondLimit This setting determines what happens when a dynamically mapped field would // exceed the total fields limit. When set @@ -43,14 +43,14 @@ type MappingLimitSettingsTotalFields struct { // similar to dynamic: false. // The fields that were not added to the mapping will be added to the _ignored // field. - IgnoreDynamicBeyondLimit *bool `json:"ignore_dynamic_beyond_limit,omitempty"` + IgnoreDynamicBeyondLimit string `json:"ignore_dynamic_beyond_limit,omitempty"` // Limit The maximum number of fields in an index. Field and object mappings, as well // as field aliases count towards this limit. // The limit is in place to prevent mappings and searches from becoming too // large. Higher values can lead to performance // degradations and memory issues, especially in clusters with a high load or // few resources. - Limit *int64 `json:"limit,omitempty"` + Limit string `json:"limit,omitempty"` } func (s *MappingLimitSettingsTotalFields) UnmarshalJSON(data []byte) error { @@ -69,33 +69,28 @@ func (s *MappingLimitSettingsTotalFields) UnmarshalJSON(data []byte) error { switch t { case "ignore_dynamic_beyond_limit": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "IgnoreDynamicBeyondLimit", err) - } - s.IgnoreDynamicBeyondLimit = &value - case bool: - s.IgnoreDynamicBeyondLimit = &v + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IgnoreDynamicBeyondLimit", err) } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IgnoreDynamicBeyondLimit = o case "limit": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Limit", err) - } - s.Limit = &value - case float64: - f := int64(v) - s.Limit = &f + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) } + s.Limit = o } } @@ -108,3 +103,13 @@ func NewMappingLimitSettingsTotalFields() *MappingLimitSettingsTotalFields { return r } + +// true + +type MappingLimitSettingsTotalFieldsVariant interface { + MappingLimitSettingsTotalFieldsCaster() *MappingLimitSettingsTotalFields +} + +func (s *MappingLimitSettingsTotalFields) MappingLimitSettingsTotalFieldsCaster() *MappingLimitSettingsTotalFields { + return s +} diff --git a/typedapi/types/mappingstats.go b/typedapi/types/mappingstats.go index d3ec981178..ab5f09c2c9 100644 --- a/typedapi/types/mappingstats.go +++ b/typedapi/types/mappingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MappingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L186-L190 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L186-L190 type MappingStats struct { TotalCount int64 `json:"total_count"` TotalEstimatedOverhead ByteSize `json:"total_estimated_overhead,omitempty"` @@ -99,3 +99,5 @@ func NewMappingStats() *MappingStats { return r } + +// false diff --git a/typedapi/types/masterisstableindicator.go b/typedapi/types/masterisstableindicator.go index eddbfa8f4c..56d9185344 100644 --- a/typedapi/types/masterisstableindicator.go +++ b/typedapi/types/masterisstableindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MasterIsStableIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L79-L83 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L80-L84 type MasterIsStableIndicator struct { Details *MasterIsStableIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewMasterIsStableIndicator() *MasterIsStableIndicator { return r } + +// false diff --git a/typedapi/types/masterisstableindicatorclusterformationnode.go b/typedapi/types/masterisstableindicatorclusterformationnode.go index 302eaa7074..d5bb34a160 100644 --- a/typedapi/types/masterisstableindicatorclusterformationnode.go +++ b/typedapi/types/masterisstableindicatorclusterformationnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MasterIsStableIndicatorClusterFormationNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L98-L102 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L99-L103 type MasterIsStableIndicatorClusterFormationNode struct { ClusterFormationMessage string `json:"cluster_formation_message"` Name *string `json:"name,omitempty"` @@ -100,3 +100,5 @@ func NewMasterIsStableIndicatorClusterFormationNode() *MasterIsStableIndicatorCl return r } + +// false diff --git a/typedapi/types/masterisstableindicatordetails.go b/typedapi/types/masterisstableindicatordetails.go index 6682c77b16..27d716c608 100644 --- a/typedapi/types/masterisstableindicatordetails.go +++ b/typedapi/types/masterisstableindicatordetails.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // MasterIsStableIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L84-L89 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L85-L90 type MasterIsStableIndicatorDetails struct { ClusterFormation []MasterIsStableIndicatorClusterFormationNode `json:"cluster_formation,omitempty"` CurrentMaster IndicatorNode `json:"current_master"` @@ -36,3 +36,5 @@ func NewMasterIsStableIndicatorDetails() *MasterIsStableIndicatorDetails { return r } + +// false diff --git a/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go b/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go index 7529860836..14aa1140cf 100644 --- a/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go +++ b/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MasterIsStableIndicatorExceptionFetchingHistory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L94-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L95-L98 type MasterIsStableIndicatorExceptionFetchingHistory struct { Message string `json:"message"` StackTrace string `json:"stack_trace"` @@ -87,3 +87,5 @@ func NewMasterIsStableIndicatorExceptionFetchingHistory() *MasterIsStableIndicat return r } + +// false diff --git a/typedapi/types/masterrecord.go b/typedapi/types/masterrecord.go index 51398730df..2b48897825 100644 --- a/typedapi/types/masterrecord.go +++ b/typedapi/types/masterrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MasterRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/master/types.ts#L20-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/master/types.ts#L20-L39 type MasterRecord struct { // Host host name Host *string `json:"host,omitempty"` @@ -117,3 +117,5 @@ func NewMasterRecord() *MasterRecord { return r } + +// false diff --git a/typedapi/types/matchallquery.go b/typedapi/types/matchallquery.go index 07a0ae74a6..0ceb0f8b05 100644 --- a/typedapi/types/matchallquery.go +++ b/typedapi/types/matchallquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MatchAllQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/MatchAllQuery.ts#L22-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/MatchAllQuery.ts#L22-L25 type MatchAllQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -96,3 +96,13 @@ func NewMatchAllQuery() *MatchAllQuery { return r } + +// true + +type MatchAllQueryVariant interface { + MatchAllQueryCaster() *MatchAllQuery +} + +func (s *MatchAllQuery) MatchAllQueryCaster() *MatchAllQuery { + return s +} diff --git a/typedapi/types/matchboolprefixquery.go b/typedapi/types/matchboolprefixquery.go index cbe2f1a449..41b4f21081 100644 --- a/typedapi/types/matchboolprefixquery.go +++ b/typedapi/types/matchboolprefixquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MatchBoolPrefixQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L349-L403 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L355-L412 type MatchBoolPrefixQuery struct { // Analyzer Analyzer used to convert the text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -232,3 +232,13 @@ func NewMatchBoolPrefixQuery() *MatchBoolPrefixQuery { return r } + +// true + +type MatchBoolPrefixQueryVariant interface { + MatchBoolPrefixQueryCaster() *MatchBoolPrefixQuery +} + +func (s *MatchBoolPrefixQuery) MatchBoolPrefixQueryCaster() *MatchBoolPrefixQuery { + return s +} diff --git a/typedapi/types/matchedfield.go b/typedapi/types/matchedfield.go index eebbcdbfee..761e69494e 100644 --- a/typedapi/types/matchedfield.go +++ b/typedapi/types/matchedfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MatchedField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/text_structure/test_grok_pattern/types.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/test_grok_pattern/types.ts#L23-L27 type MatchedField struct { Length int `json:"length"` Match string `json:"match"` @@ -108,3 +108,5 @@ func NewMatchedField() *MatchedField { return r } + +// false diff --git a/typedapi/types/matchedtext.go b/typedapi/types/matchedtext.go index e48d71a3a9..bfb3a94373 100644 --- a/typedapi/types/matchedtext.go +++ b/typedapi/types/matchedtext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MatchedText type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/text_structure/test_grok_pattern/types.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/test_grok_pattern/types.ts#L29-L32 type MatchedText struct { Fields map[string][]MatchedField `json:"fields,omitempty"` Matched bool `json:"matched"` @@ -82,8 +82,10 @@ func (s *MatchedText) UnmarshalJSON(data []byte) error { // NewMatchedText returns a MatchedText. func NewMatchedText() *MatchedText { r := &MatchedText{ - Fields: make(map[string][]MatchedField, 0), + Fields: make(map[string][]MatchedField), } return r } + +// false diff --git a/typedapi/types/matchnonequery.go b/typedapi/types/matchnonequery.go index 629ddaf1d0..edbba5d8c1 100644 --- a/typedapi/types/matchnonequery.go +++ b/typedapi/types/matchnonequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MatchNoneQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/MatchNoneQuery.ts#L22-L22 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/MatchNoneQuery.ts#L22-L25 type MatchNoneQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -96,3 +96,13 @@ func NewMatchNoneQuery() *MatchNoneQuery { return r } + +// true + +type MatchNoneQueryVariant interface { + MatchNoneQueryCaster() *MatchNoneQuery +} + +func (s *MatchNoneQuery) MatchNoneQueryCaster() *MatchNoneQuery { + return s +} diff --git a/typedapi/types/matchonlytextproperty.go b/typedapi/types/matchonlytextproperty.go index 8ee0f07963..b366c86577 100644 --- a/typedapi/types/matchonlytextproperty.go +++ b/typedapi/types/matchonlytextproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // MatchOnlyTextProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L230-L255 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L247-L272 type MatchOnlyTextProperty struct { // CopyTo Allows you to copy the values of multiple fields into a group // field, which can then be queried as a single field. @@ -95,301 +95,313 @@ func (s *MatchOnlyTextProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -431,9 +443,19 @@ func (s MatchOnlyTextProperty) MarshalJSON() ([]byte, error) { // NewMatchOnlyTextProperty returns a MatchOnlyTextProperty. func NewMatchOnlyTextProperty() *MatchOnlyTextProperty { r := &MatchOnlyTextProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), } return r } + +// true + +type MatchOnlyTextPropertyVariant interface { + MatchOnlyTextPropertyCaster() *MatchOnlyTextProperty +} + +func (s *MatchOnlyTextProperty) MatchOnlyTextPropertyCaster() *MatchOnlyTextProperty { + return s +} diff --git a/typedapi/types/matchphraseprefixquery.go b/typedapi/types/matchphraseprefixquery.go index 5c5397a41a..2ac2dbf1a6 100644 --- a/typedapi/types/matchphraseprefixquery.go +++ b/typedapi/types/matchphraseprefixquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MatchPhrasePrefixQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L428-L454 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L440-L469 type MatchPhrasePrefixQuery struct { // Analyzer Analyzer used to convert text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -183,3 +183,13 @@ func NewMatchPhrasePrefixQuery() *MatchPhrasePrefixQuery { return r } + +// true + +type MatchPhrasePrefixQueryVariant interface { + MatchPhrasePrefixQueryCaster() *MatchPhrasePrefixQuery +} + +func (s *MatchPhrasePrefixQuery) MatchPhrasePrefixQueryCaster() *MatchPhrasePrefixQuery { + return s +} diff --git a/typedapi/types/matchphrasequery.go b/typedapi/types/matchphrasequery.go index a5ffec824d..70c4a2b6a7 100644 --- a/typedapi/types/matchphrasequery.go +++ b/typedapi/types/matchphrasequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MatchPhraseQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L405-L426 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L414-L438 type MatchPhraseQuery struct { // Analyzer Analyzer used to convert the text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -164,3 +164,13 @@ func NewMatchPhraseQuery() *MatchPhraseQuery { return r } + +// true + +type MatchPhraseQueryVariant interface { + MatchPhraseQueryCaster() *MatchPhraseQuery +} + +func (s *MatchPhraseQuery) MatchPhraseQueryCaster() *MatchPhraseQuery { + return s +} diff --git a/typedapi/types/matchquery.go b/typedapi/types/matchquery.go index 9ab45aefd7..04894a952f 100644 --- a/typedapi/types/matchquery.go +++ b/typedapi/types/matchquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // MatchQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L282-L347 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L285-L353 type MatchQuery struct { // Analyzer Analyzer used to convert the text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -279,3 +279,13 @@ func NewMatchQuery() *MatchQuery { return r } + +// true + +type MatchQueryVariant interface { + MatchQueryCaster() *MatchQuery +} + +func (s *MatchQuery) MatchQueryCaster() *MatchQuery { + return s +} diff --git a/typedapi/types/matrixstatsaggregate.go b/typedapi/types/matrixstatsaggregate.go index 99d6a5c649..5bb13db20c 100644 --- a/typedapi/types/matrixstatsaggregate.go +++ b/typedapi/types/matrixstatsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MatrixStatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L764-L768 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L866-L873 type MatrixStatsAggregate struct { DocCount int64 `json:"doc_count"` Fields []MatrixStatsFields `json:"fields,omitempty"` @@ -89,3 +89,5 @@ func NewMatrixStatsAggregate() *MatrixStatsAggregate { return r } + +// false diff --git a/typedapi/types/matrixstatsaggregation.go b/typedapi/types/matrixstatsaggregation.go index 77a17ce24f..35de864d93 100644 --- a/typedapi/types/matrixstatsaggregation.go +++ b/typedapi/types/matrixstatsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // MatrixStatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/matrix.ts#L38-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/matrix.ts#L38-L44 type MatrixStatsAggregation struct { // Fields An array of fields for computing the statistics. Fields []string `json:"fields,omitempty"` @@ -95,8 +95,18 @@ func (s *MatrixStatsAggregation) UnmarshalJSON(data []byte) error { // NewMatrixStatsAggregation returns a MatrixStatsAggregation. func NewMatrixStatsAggregation() *MatrixStatsAggregation { r := &MatrixStatsAggregation{ - Missing: make(map[string]Float64, 0), + Missing: make(map[string]Float64), } return r } + +// true + +type MatrixStatsAggregationVariant interface { + MatrixStatsAggregationCaster() *MatrixStatsAggregation +} + +func (s *MatrixStatsAggregation) MatrixStatsAggregationCaster() *MatrixStatsAggregation { + return s +} diff --git a/typedapi/types/matrixstatsfields.go b/typedapi/types/matrixstatsfields.go index 504a57a9f9..ba9c060e3c 100644 --- a/typedapi/types/matrixstatsfields.go +++ b/typedapi/types/matrixstatsfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MatrixStatsFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L770-L779 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L875-L884 type MatrixStatsFields struct { Correlation map[string]Float64 `json:"correlation"` Count int64 `json:"count"` @@ -166,9 +166,11 @@ func (s *MatrixStatsFields) UnmarshalJSON(data []byte) error { // NewMatrixStatsFields returns a MatrixStatsFields. func NewMatrixStatsFields() *MatrixStatsFields { r := &MatrixStatsFields{ - Correlation: make(map[string]Float64, 0), - Covariance: make(map[string]Float64, 0), + Correlation: make(map[string]Float64), + Covariance: make(map[string]Float64), } return r } + +// false diff --git a/typedapi/types/maxaggregate.go b/typedapi/types/maxaggregate.go index 610b612796..4ec1a3a783 100644 --- a/typedapi/types/maxaggregate.go +++ b/typedapi/types/maxaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MaxAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L200-L201 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L205-L209 type MaxAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewMaxAggregate() *MaxAggregate { return r } + +// false diff --git a/typedapi/types/maxaggregation.go b/typedapi/types/maxaggregation.go index f755aa8af7..9e0dd6dedb 100644 --- a/typedapi/types/maxaggregation.go +++ b/typedapi/types/maxaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MaxAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L162-L162 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L165-L165 type MaxAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewMaxAggregation() *MaxAggregation { return r } + +// true + +type MaxAggregationVariant interface { + MaxAggregationCaster() *MaxAggregation +} + +func (s *MaxAggregation) MaxAggregationCaster() *MaxAggregation { + return s +} diff --git a/typedapi/types/maxbucketaggregation.go b/typedapi/types/maxbucketaggregation.go index 438edf3b53..349d297e75 100644 --- a/typedapi/types/maxbucketaggregation.go +++ b/typedapi/types/maxbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MaxBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L224-L224 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L244-L247 type MaxBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewMaxBucketAggregation() *MaxBucketAggregation { return r } + +// true + +type MaxBucketAggregationVariant interface { + MaxBucketAggregationCaster() *MaxBucketAggregation +} + +func (s *MaxBucketAggregation) MaxBucketAggregationCaster() *MaxBucketAggregation { + return s +} diff --git a/typedapi/types/bucketpathaggregation.go b/typedapi/types/maxmind.go similarity index 58% rename from typedapi/types/bucketpathaggregation.go rename to typedapi/types/maxmind.go index 0da995b6db..2e0911e5c1 100644 --- a/typedapi/types/bucketpathaggregation.go +++ b/typedapi/types/maxmind.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,15 +28,14 @@ import ( "io" ) -// BucketPathAggregation type. +// Maxmind type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L31-L37 -type BucketPathAggregation struct { - // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Database.ts#L55-L57 +type Maxmind struct { + AccountId string `json:"account_id"` } -func (s *BucketPathAggregation) UnmarshalJSON(data []byte) error { +func (s *Maxmind) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -51,9 +50,9 @@ func (s *BucketPathAggregation) UnmarshalJSON(data []byte) error { switch t { - case "buckets_path": - if err := dec.Decode(&s.BucketsPath); err != nil { - return fmt.Errorf("%s | %w", "BucketsPath", err) + case "account_id": + if err := dec.Decode(&s.AccountId); err != nil { + return fmt.Errorf("%s | %w", "AccountId", err) } } @@ -61,9 +60,19 @@ func (s *BucketPathAggregation) UnmarshalJSON(data []byte) error { return nil } -// NewBucketPathAggregation returns a BucketPathAggregation. -func NewBucketPathAggregation() *BucketPathAggregation { - r := &BucketPathAggregation{} +// NewMaxmind returns a Maxmind. +func NewMaxmind() *Maxmind { + r := &Maxmind{} return r } + +// true + +type MaxmindVariant interface { + MaxmindCaster() *Maxmind +} + +func (s *Maxmind) MaxmindCaster() *Maxmind { + return s +} diff --git a/typedapi/types/medianabsolutedeviationaggregate.go b/typedapi/types/medianabsolutedeviationaggregate.go index 28f4e632a1..e0031459bb 100644 --- a/typedapi/types/medianabsolutedeviationaggregate.go +++ b/typedapi/types/medianabsolutedeviationaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MedianAbsoluteDeviationAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L194-L195 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L196-L197 type MedianAbsoluteDeviationAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewMedianAbsoluteDeviationAggregate() *MedianAbsoluteDeviationAggregate { return r } + +// false diff --git a/typedapi/types/medianabsolutedeviationaggregation.go b/typedapi/types/medianabsolutedeviationaggregation.go index 0e5e12225b..cda9d46bdc 100644 --- a/typedapi/types/medianabsolutedeviationaggregation.go +++ b/typedapi/types/medianabsolutedeviationaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MedianAbsoluteDeviationAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L164-L170 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L167-L176 type MedianAbsoluteDeviationAggregation struct { // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm // to `20 * compression`, enabling control of memory usage and approximation @@ -115,3 +115,13 @@ func NewMedianAbsoluteDeviationAggregation() *MedianAbsoluteDeviationAggregation return r } + +// true + +type MedianAbsoluteDeviationAggregationVariant interface { + MedianAbsoluteDeviationAggregationCaster() *MedianAbsoluteDeviationAggregation +} + +func (s *MedianAbsoluteDeviationAggregation) MedianAbsoluteDeviationAggregationCaster() *MedianAbsoluteDeviationAggregation { + return s +} diff --git a/typedapi/types/memmlstats.go b/typedapi/types/memmlstats.go index faf6a0a8c0..4118a3c32e 100644 --- a/typedapi/types/memmlstats.go +++ b/typedapi/types/memmlstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MemMlStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_memory_stats/types.ts#L90-L111 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_memory_stats/types.ts#L90-L111 type MemMlStats struct { // AnomalyDetectors Amount of native memory set aside for anomaly detection jobs. AnomalyDetectors ByteSize `json:"anomaly_detectors,omitempty"` @@ -192,3 +192,5 @@ func NewMemMlStats() *MemMlStats { return r } + +// false diff --git a/typedapi/types/memory.go b/typedapi/types/memory.go index d5fdc30476..d8cbb0348f 100644 --- a/typedapi/types/memory.go +++ b/typedapi/types/memory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // Memory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_memory_stats/types.ts#L25-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_memory_stats/types.ts#L25-L48 type Memory struct { Attributes map[string]string `json:"attributes"` EphemeralId string `json:"ephemeral_id"` @@ -108,8 +108,10 @@ func (s *Memory) UnmarshalJSON(data []byte) error { // NewMemory returns a Memory. func NewMemory() *Memory { r := &Memory{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/memorystats.go b/typedapi/types/memorystats.go index 63df4df620..e77eb7e301 100644 --- a/typedapi/types/memorystats.go +++ b/typedapi/types/memorystats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MemoryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L596-L620 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L632-L656 type MemoryStats struct { // AdjustedTotalInBytes If the amount of physical memory has been overridden using the // `es`.`total_memory_bytes` system property then this reports the overridden @@ -219,3 +219,5 @@ func NewMemoryStats() *MemoryStats { return r } + +// false diff --git a/typedapi/types/memstats.go b/typedapi/types/memstats.go index ea20693adf..ea0dd6b916 100644 --- a/typedapi/types/memstats.go +++ b/typedapi/types/memstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MemStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/get_memory_stats/types.ts#L65-L88 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/get_memory_stats/types.ts#L65-L88 type MemStats struct { // AdjustedTotal If the amount of physical memory has been overridden using the // es.total_memory_bytes system property @@ -124,3 +124,5 @@ func NewMemStats() *MemStats { return r } + +// false diff --git a/typedapi/types/merge.go b/typedapi/types/merge.go index 12b9d3243e..5810739426 100644 --- a/typedapi/types/merge.go +++ b/typedapi/types/merge.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Merge type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L332-L334 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L346-L348 type Merge struct { Scheduler *MergeScheduler `json:"scheduler,omitempty"` } @@ -33,3 +33,13 @@ func NewMerge() *Merge { return r } + +// true + +type MergeVariant interface { + MergeCaster() *Merge +} + +func (s *Merge) MergeCaster() *Merge { + return s +} diff --git a/typedapi/types/mergescheduler.go b/typedapi/types/mergescheduler.go index 459e0418cf..2ef0bcb26a 100644 --- a/typedapi/types/mergescheduler.go +++ b/typedapi/types/mergescheduler.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // MergeScheduler type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L336-L339 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L350-L353 type MergeScheduler struct { MaxMergeCount Stringifiedinteger `json:"max_merge_count,omitempty"` MaxThreadCount Stringifiedinteger `json:"max_thread_count,omitempty"` @@ -72,3 +72,13 @@ func NewMergeScheduler() *MergeScheduler { return r } + +// true + +type MergeSchedulerVariant interface { + MergeSchedulerCaster() *MergeScheduler +} + +func (s *MergeScheduler) MergeSchedulerCaster() *MergeScheduler { + return s +} diff --git a/typedapi/types/mergesstats.go b/typedapi/types/mergesstats.go index 5bab244cb9..77fe1a66a9 100644 --- a/typedapi/types/mergesstats.go +++ b/typedapi/types/mergesstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MergesStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L161-L178 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L164-L181 type MergesStats struct { Current int64 `json:"current"` CurrentDocs int64 `json:"current_docs"` @@ -248,3 +248,5 @@ func NewMergesStats() *MergesStats { return r } + +// false diff --git a/typedapi/types/message.go b/typedapi/types/message.go new file mode 100644 index 0000000000..9cc44d32df --- /dev/null +++ b/typedapi/types/message.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Message type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L145-L165 +type Message struct { + // Content The content of the message. + Content MessageContent `json:"content,omitempty"` + // Role The role of the message author. + Role string `json:"role"` + // ToolCallId The tool call that this message is responding to. + ToolCallId *string `json:"tool_call_id,omitempty"` + // ToolCalls The tool calls generated by the model. + ToolCalls []ToolCall `json:"tool_calls,omitempty"` +} + +func (s *Message) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "content": + if err := dec.Decode(&s.Content); err != nil { + return fmt.Errorf("%s | %w", "Content", err) + } + + case "role": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Role", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Role = o + + case "tool_call_id": + if err := dec.Decode(&s.ToolCallId); err != nil { + return fmt.Errorf("%s | %w", "ToolCallId", err) + } + + case "tool_calls": + if err := dec.Decode(&s.ToolCalls); err != nil { + return fmt.Errorf("%s | %w", "ToolCalls", err) + } + + } + } + return nil +} + +// NewMessage returns a Message. +func NewMessage() *Message { + r := &Message{} + + return r +} + +// true + +type MessageVariant interface { + MessageCaster() *Message +} + +func (s *Message) MessageCaster() *Message { + return s +} diff --git a/typedapi/types/rankfeaturefunction.go b/typedapi/types/messagecontent.go similarity index 64% rename from typedapi/types/rankfeaturefunction.go rename to typedapi/types/messagecontent.go index c70a17411e..22792845d4 100644 --- a/typedapi/types/rankfeaturefunction.go +++ b/typedapi/types/messagecontent.go @@ -16,19 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types -// RankFeatureFunction type. +// MessageContent holds the union for the following types: // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L273-L273 -type RankFeatureFunction struct { -} - -// NewRankFeatureFunction returns a RankFeatureFunction. -func NewRankFeatureFunction() *RankFeatureFunction { - r := &RankFeatureFunction{} +// string +// []ContentObject +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L140-L143 +type MessageContent any - return r +type MessageContentVariant interface { + MessageContentCaster() *MessageContent } diff --git a/typedapi/types/metadata.go b/typedapi/types/metadata.go index e93c6a855b..1e6c205ba7 100644 --- a/typedapi/types/metadata.go +++ b/typedapi/types/metadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,9 @@ import ( // Metadata type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L99-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L100-L100 type Metadata map[string]json.RawMessage + +type MetadataVariant interface { + MetadataCaster() *Metadata +} diff --git a/typedapi/types/metrics.go b/typedapi/types/metrics.go index 6d0f7f160e..d63d9e633c 100644 --- a/typedapi/types/metrics.go +++ b/typedapi/types/metrics.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Metrics type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L76-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L76-L76 type Metrics []string diff --git a/typedapi/types/mgetoperation.go b/typedapi/types/mgetoperation.go index 733f0c1c22..21398e3507 100644 --- a/typedapi/types/mgetoperation.go +++ b/typedapi/types/mgetoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // MgetOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/mget/types.ts#L32-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/mget/types.ts#L32-L55 type MgetOperation struct { // Id_ The unique document ID. Id_ string `json:"_id"` @@ -152,3 +152,13 @@ func NewMgetOperation() *MgetOperation { return r } + +// true + +type MgetOperationVariant interface { + MgetOperationCaster() *MgetOperation +} + +func (s *MgetOperation) MgetOperationCaster() *MgetOperation { + return s +} diff --git a/typedapi/types/mgetresponseitem.go b/typedapi/types/mgetresponseitem.go index 0cbbac20a2..2c0929b2b7 100644 --- a/typedapi/types/mgetresponseitem.go +++ b/typedapi/types/mgetresponseitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // GetResult // MultiGetError // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/mget/types.ts#L57-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/mget/types.ts#L57-L60 type MgetResponseItem any diff --git a/typedapi/types/migrateaction.go b/typedapi/types/migrateaction.go index aba565ee90..0fe69e3256 100644 --- a/typedapi/types/migrateaction.go +++ b/typedapi/types/migrateaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MigrateAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L144-L146 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L141-L143 type MigrateAction struct { Enabled *bool `json:"enabled,omitempty"` } @@ -76,3 +76,13 @@ func NewMigrateAction() *MigrateAction { return r } + +// true + +type MigrateActionVariant interface { + MigrateActionCaster() *MigrateAction +} + +func (s *MigrateAction) MigrateActionCaster() *MigrateAction { + return s +} diff --git a/typedapi/types/migratereindex.go b/typedapi/types/migratereindex.go new file mode 100644 index 0000000000..21a93d8d9d --- /dev/null +++ b/typedapi/types/migratereindex.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/modeenum" +) + +// MigrateReindex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L39-L48 +type MigrateReindex struct { + // Mode Reindex mode. Currently only 'upgrade' is supported. + Mode modeenum.ModeEnum `json:"mode"` + // Source The source index or data stream (only data streams are currently supported). + Source SourceIndex `json:"source"` +} + +// NewMigrateReindex returns a MigrateReindex. +func NewMigrateReindex() *MigrateReindex { + r := &MigrateReindex{} + + return r +} + +// true + +type MigrateReindexVariant interface { + MigrateReindexCaster() *MigrateReindex +} + +func (s *MigrateReindex) MigrateReindexCaster() *MigrateReindex { + return s +} diff --git a/typedapi/types/migrationfeatureindexinfo.go b/typedapi/types/migrationfeatureindexinfo.go index ef1e3a1bfd..5c23c07a64 100644 --- a/typedapi/types/migrationfeatureindexinfo.go +++ b/typedapi/types/migrationfeatureindexinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // MigrationFeatureIndexInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L44-L48 type MigrationFeatureIndexInfo struct { FailureCause *ErrorCause `json:"failure_cause,omitempty"` Index string `json:"index"` @@ -78,3 +78,5 @@ func NewMigrationFeatureIndexInfo() *MigrationFeatureIndexInfo { return r } + +// false diff --git a/typedapi/types/minaggregate.go b/typedapi/types/minaggregate.go index 84264fbc70..12c0d5bfb0 100644 --- a/typedapi/types/minaggregate.go +++ b/typedapi/types/minaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MinAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L197-L198 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L199-L203 type MinAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewMinAggregate() *MinAggregate { return r } + +// false diff --git a/typedapi/types/minaggregation.go b/typedapi/types/minaggregation.go index d7e1e46526..f23924b951 100644 --- a/typedapi/types/minaggregation.go +++ b/typedapi/types/minaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MinAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L172-L172 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L178-L178 type MinAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewMinAggregation() *MinAggregation { return r } + +// true + +type MinAggregationVariant interface { + MinAggregationCaster() *MinAggregation +} + +func (s *MinAggregation) MinAggregationCaster() *MinAggregation { + return s +} diff --git a/typedapi/types/minbucketaggregation.go b/typedapi/types/minbucketaggregation.go index 724de23c67..f5b36661b2 100644 --- a/typedapi/types/minbucketaggregation.go +++ b/typedapi/types/minbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MinBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L226-L226 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L249-L252 type MinBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewMinBucketAggregation() *MinBucketAggregation { return r } + +// true + +type MinBucketAggregationVariant interface { + MinBucketAggregationCaster() *MinBucketAggregation +} + +func (s *MinBucketAggregation) MinBucketAggregationCaster() *MinBucketAggregation { + return s +} diff --git a/typedapi/types/minimallicenseinformation.go b/typedapi/types/minimallicenseinformation.go index 381aa1add3..455c6ad889 100644 --- a/typedapi/types/minimallicenseinformation.go +++ b/typedapi/types/minimallicenseinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // MinimalLicenseInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/info/types.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/info/types.ts#L34-L40 type MinimalLicenseInformation struct { ExpiryDateInMillis int64 `json:"expiry_date_in_millis"` Mode licensetype.LicenseType `json:"mode"` @@ -101,3 +101,5 @@ func NewMinimalLicenseInformation() *MinimalLicenseInformation { return r } + +// false diff --git a/typedapi/types/minimumshouldmatch.go b/typedapi/types/minimumshouldmatch.go index 233de9b027..9ad178f132 100644 --- a/typedapi/types/minimumshouldmatch.go +++ b/typedapi/types/minimumshouldmatch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // int // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L163-L167 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L167-L171 type MinimumShouldMatch any + +type MinimumShouldMatchVariant interface { + MinimumShouldMatchCaster() *MinimumShouldMatch +} diff --git a/typedapi/types/missing.go b/typedapi/types/missing.go index 99a60df564..eaad94a7b6 100644 --- a/typedapi/types/missing.go +++ b/typedapi/types/missing.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,5 +27,9 @@ package types // Float64 // bool // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/AggregationContainer.ts#L516-L516 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/AggregationContainer.ts#L535-L535 type Missing any + +type MissingVariant interface { + MissingCaster() *Missing +} diff --git a/typedapi/types/missingaggregate.go b/typedapi/types/missingaggregate.go index 15726be935..d0825e465c 100644 --- a/typedapi/types/missingaggregate.go +++ b/typedapi/types/missingaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // MissingAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L487-L488 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L528-L532 type MissingAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *MissingAggregate) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s MissingAggregate) MarshalJSON() ([]byte, error) { // NewMissingAggregate returns a MissingAggregate. func NewMissingAggregate() *MissingAggregate { r := &MissingAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/missingaggregation.go b/typedapi/types/missingaggregation.go index b72eab56cd..6706310abf 100644 --- a/typedapi/types/missingaggregation.go +++ b/typedapi/types/missingaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // MissingAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L576-L582 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L593-L599 type MissingAggregation struct { // Field The name of the field. Field *string `json:"field,omitempty"` @@ -73,3 +73,13 @@ func NewMissingAggregation() *MissingAggregation { return r } + +// true + +type MissingAggregationVariant interface { + MissingAggregationCaster() *MissingAggregation +} + +func (s *MissingAggregation) MissingAggregationCaster() *MissingAggregation { + return s +} diff --git a/typedapi/types/mlcounter.go b/typedapi/types/mlcounter.go index 7cc095e8c7..2246eaa35e 100644 --- a/typedapi/types/mlcounter.go +++ b/typedapi/types/mlcounter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MlCounter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L255-L257 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L265-L267 type MlCounter struct { Count int64 `json:"count"` } @@ -77,3 +77,5 @@ func NewMlCounter() *MlCounter { return r } + +// false diff --git a/typedapi/types/mldatafeed.go b/typedapi/types/mldatafeed.go index 64a6fcdbc6..ffacd16b06 100644 --- a/typedapi/types/mldatafeed.go +++ b/typedapi/types/mldatafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MLDatafeed type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L36-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L37-L61 type MLDatafeed struct { Aggregations map[string]Aggregations `json:"aggregations,omitempty"` // Authorization The security privileges that the datafeed uses to run its queries. If Elastic @@ -185,9 +185,11 @@ func (s *MLDatafeed) UnmarshalJSON(data []byte) error { // NewMLDatafeed returns a MLDatafeed. func NewMLDatafeed() *MLDatafeed { r := &MLDatafeed{ - Aggregations: make(map[string]Aggregations, 0), - ScriptFields: make(map[string]ScriptField, 0), + Aggregations: make(map[string]Aggregations), + ScriptFields: make(map[string]ScriptField), } return r } + +// false diff --git a/typedapi/types/mldataframeanalyticsjobs.go b/typedapi/types/mldataframeanalyticsjobs.go index 0f2e953bf6..c975c791e3 100644 --- a/typedapi/types/mldataframeanalyticsjobs.go +++ b/typedapi/types/mldataframeanalyticsjobs.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // MlDataFrameAnalyticsJobs type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L177-L182 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L187-L192 type MlDataFrameAnalyticsJobs struct { All_ MlDataFrameAnalyticsJobsCount `json:"_all"` AnalysisCounts *MlDataFrameAnalyticsJobsAnalysis `json:"analysis_counts,omitempty"` @@ -36,3 +36,5 @@ func NewMlDataFrameAnalyticsJobs() *MlDataFrameAnalyticsJobs { return r } + +// false diff --git a/typedapi/types/mldataframeanalyticsjobsanalysis.go b/typedapi/types/mldataframeanalyticsjobsanalysis.go index 9dbc2ff9f0..8b43582725 100644 --- a/typedapi/types/mldataframeanalyticsjobsanalysis.go +++ b/typedapi/types/mldataframeanalyticsjobsanalysis.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MlDataFrameAnalyticsJobsAnalysis type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L184-L188 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L194-L198 type MlDataFrameAnalyticsJobsAnalysis struct { Classification *int `json:"classification,omitempty"` OutlierDetection *int `json:"outlier_detection,omitempty"` @@ -112,3 +112,5 @@ func NewMlDataFrameAnalyticsJobsAnalysis() *MlDataFrameAnalyticsJobsAnalysis { return r } + +// false diff --git a/typedapi/types/mldataframeanalyticsjobscount.go b/typedapi/types/mldataframeanalyticsjobscount.go index 30d295450c..3ef905086c 100644 --- a/typedapi/types/mldataframeanalyticsjobscount.go +++ b/typedapi/types/mldataframeanalyticsjobscount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MlDataFrameAnalyticsJobsCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L194-L196 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L204-L206 type MlDataFrameAnalyticsJobsCount struct { Count int64 `json:"count"` } @@ -77,3 +77,5 @@ func NewMlDataFrameAnalyticsJobsCount() *MlDataFrameAnalyticsJobsCount { return r } + +// false diff --git a/typedapi/types/mldataframeanalyticsjobsmemory.go b/typedapi/types/mldataframeanalyticsjobsmemory.go index 77884a35a6..6d8938391e 100644 --- a/typedapi/types/mldataframeanalyticsjobsmemory.go +++ b/typedapi/types/mldataframeanalyticsjobsmemory.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // MlDataFrameAnalyticsJobsMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L190-L192 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L200-L202 type MlDataFrameAnalyticsJobsMemory struct { PeakUsageBytes JobStatistics `json:"peak_usage_bytes"` } @@ -33,3 +33,5 @@ func NewMlDataFrameAnalyticsJobsMemory() *MlDataFrameAnalyticsJobsMemory { return r } + +// false diff --git a/typedapi/types/mlfilter.go b/typedapi/types/mlfilter.go index 23630b66fc..f4827d5ca2 100644 --- a/typedapi/types/mlfilter.go +++ b/typedapi/types/mlfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MLFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Filter.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Filter.ts#L22-L29 type MLFilter struct { // Description A description of the filter. Description *string `json:"description,omitempty"` @@ -89,3 +89,5 @@ func NewMLFilter() *MLFilter { return r } + +// false diff --git a/typedapi/types/mlinference.go b/typedapi/types/mlinference.go index 976d4b01d0..16e8272ad8 100644 --- a/typedapi/types/mlinference.go +++ b/typedapi/types/mlinference.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // MlInference type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L198-L206 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L208-L216 type MlInference struct { Deployments *MlInferenceDeployments `json:"deployments,omitempty"` IngestProcessors map[string]MlInferenceIngestProcessor `json:"ingest_processors"` @@ -32,8 +32,10 @@ type MlInference struct { // NewMlInference returns a MlInference. func NewMlInference() *MlInference { r := &MlInference{ - IngestProcessors: make(map[string]MlInferenceIngestProcessor, 0), + IngestProcessors: make(map[string]MlInferenceIngestProcessor), } return r } + +// false diff --git a/typedapi/types/mlinferencedeployments.go b/typedapi/types/mlinferencedeployments.go index fb10d3b47b..e0bc5a43d9 100644 --- a/typedapi/types/mlinferencedeployments.go +++ b/typedapi/types/mlinferencedeployments.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MlInferenceDeployments type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L227-L232 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L237-L242 type MlInferenceDeployments struct { Count int `json:"count"` InferenceCounts JobStatistics `json:"inference_counts"` @@ -96,3 +96,5 @@ func NewMlInferenceDeployments() *MlInferenceDeployments { return r } + +// false diff --git a/typedapi/types/mlinferencedeploymentstimems.go b/typedapi/types/mlinferencedeploymentstimems.go index 75bbe0cd4b..c266c4722f 100644 --- a/typedapi/types/mlinferencedeploymentstimems.go +++ b/typedapi/types/mlinferencedeploymentstimems.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MlInferenceDeploymentsTimeMs type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L234-L236 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L244-L246 type MlInferenceDeploymentsTimeMs struct { Avg Float64 `json:"avg"` } @@ -78,3 +78,5 @@ func NewMlInferenceDeploymentsTimeMs() *MlInferenceDeploymentsTimeMs { return r } + +// false diff --git a/typedapi/types/mlinferenceingestprocessor.go b/typedapi/types/mlinferenceingestprocessor.go index c5fcc2412d..c4a1304dd9 100644 --- a/typedapi/types/mlinferenceingestprocessor.go +++ b/typedapi/types/mlinferenceingestprocessor.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // MlInferenceIngestProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L208-L213 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L218-L223 type MlInferenceIngestProcessor struct { NumDocsProcessed MlInferenceIngestProcessorCount `json:"num_docs_processed"` NumFailures MlInferenceIngestProcessorCount `json:"num_failures"` @@ -36,3 +36,5 @@ func NewMlInferenceIngestProcessor() *MlInferenceIngestProcessor { return r } + +// false diff --git a/typedapi/types/mlinferenceingestprocessorcount.go b/typedapi/types/mlinferenceingestprocessorcount.go index 85e896c226..7b95168dc3 100644 --- a/typedapi/types/mlinferenceingestprocessorcount.go +++ b/typedapi/types/mlinferenceingestprocessorcount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MlInferenceIngestProcessorCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L238-L242 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L248-L252 type MlInferenceIngestProcessorCount struct { Max int64 `json:"max"` Min int64 `json:"min"` @@ -109,3 +109,5 @@ func NewMlInferenceIngestProcessorCount() *MlInferenceIngestProcessorCount { return r } + +// false diff --git a/typedapi/types/mlinferencetrainedmodels.go b/typedapi/types/mlinferencetrainedmodels.go index 160c3424f3..deb56ac5fe 100644 --- a/typedapi/types/mlinferencetrainedmodels.go +++ b/typedapi/types/mlinferencetrainedmodels.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // MlInferenceTrainedModels type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L215-L225 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L225-L235 type MlInferenceTrainedModels struct { All_ MlCounter `json:"_all"` Count *MlInferenceTrainedModelsCount `json:"count,omitempty"` @@ -37,3 +37,5 @@ func NewMlInferenceTrainedModels() *MlInferenceTrainedModels { return r } + +// false diff --git a/typedapi/types/mlinferencetrainedmodelscount.go b/typedapi/types/mlinferencetrainedmodelscount.go index 3a3e355d17..cb1477bd8f 100644 --- a/typedapi/types/mlinferencetrainedmodelscount.go +++ b/typedapi/types/mlinferencetrainedmodelscount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MlInferenceTrainedModelsCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L244-L253 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L254-L263 type MlInferenceTrainedModelsCount struct { Classification *int64 `json:"classification,omitempty"` Ner *int64 `json:"ner,omitempty"` @@ -189,3 +189,5 @@ func NewMlInferenceTrainedModelsCount() *MlInferenceTrainedModelsCount { return r } + +// false diff --git a/typedapi/types/mljobforecasts.go b/typedapi/types/mljobforecasts.go index 6dd8595a1c..e45ca4699e 100644 --- a/typedapi/types/mljobforecasts.go +++ b/typedapi/types/mljobforecasts.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MlJobForecasts type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L172-L175 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L182-L185 type MlJobForecasts struct { ForecastedJobs int64 `json:"forecasted_jobs"` Total int64 `json:"total"` @@ -93,3 +93,5 @@ func NewMlJobForecasts() *MlJobForecasts { return r } + +// false diff --git a/typedapi/types/modelpackageconfig.go b/typedapi/types/modelpackageconfig.go new file mode 100644 index 0000000000..810c4d9c3a --- /dev/null +++ b/typedapi/types/modelpackageconfig.go @@ -0,0 +1,203 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ModelPackageConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L256-L271 +type ModelPackageConfig struct { + CreateTime *int64 `json:"create_time,omitempty"` + Description *string `json:"description,omitempty"` + InferenceConfig map[string]json.RawMessage `json:"inference_config,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + MinimumVersion *string `json:"minimum_version,omitempty"` + ModelRepository *string `json:"model_repository,omitempty"` + ModelType *string `json:"model_type,omitempty"` + PackagedModelId string `json:"packaged_model_id"` + PlatformArchitecture *string `json:"platform_architecture,omitempty"` + PrefixStrings *TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` + Sha256 *string `json:"sha256,omitempty"` + Size ByteSize `json:"size,omitempty"` + Tags []string `json:"tags,omitempty"` + VocabularyFile *string `json:"vocabulary_file,omitempty"` +} + +func (s *ModelPackageConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "inference_config": + if s.InferenceConfig == nil { + s.InferenceConfig = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.InferenceConfig); err != nil { + return fmt.Errorf("%s | %w", "InferenceConfig", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "minimum_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinimumVersion", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinimumVersion = &o + + case "model_repository": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelRepository", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelRepository = &o + + case "model_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelType = &o + + case "packaged_model_id": + if err := dec.Decode(&s.PackagedModelId); err != nil { + return fmt.Errorf("%s | %w", "PackagedModelId", err) + } + + case "platform_architecture": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PlatformArchitecture", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PlatformArchitecture = &o + + case "prefix_strings": + if err := dec.Decode(&s.PrefixStrings); err != nil { + return fmt.Errorf("%s | %w", "PrefixStrings", err) + } + + case "sha256": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Sha256", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Sha256 = &o + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "tags": + if err := dec.Decode(&s.Tags); err != nil { + return fmt.Errorf("%s | %w", "Tags", err) + } + + case "vocabulary_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VocabularyFile", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VocabularyFile = &o + + } + } + return nil +} + +// NewModelPackageConfig returns a ModelPackageConfig. +func NewModelPackageConfig() *ModelPackageConfig { + r := &ModelPackageConfig{ + InferenceConfig: make(map[string]json.RawMessage), + } + + return r +} + +// false diff --git a/typedapi/types/modelplotconfig.go b/typedapi/types/modelplotconfig.go index aa68175bb4..bb632a55c5 100644 --- a/typedapi/types/modelplotconfig.go +++ b/typedapi/types/modelplotconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ModelPlotConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/ModelPlot.ts#L23-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/ModelPlot.ts#L23-L42 type ModelPlotConfig struct { // AnnotationsEnabled If true, enables calculation and storage of the model change annotations for // each entity that is being analyzed. @@ -105,3 +105,13 @@ func NewModelPlotConfig() *ModelPlotConfig { return r } + +// true + +type ModelPlotConfigVariant interface { + ModelPlotConfigCaster() *ModelPlotConfig +} + +func (s *ModelPlotConfig) ModelPlotConfigCaster() *ModelPlotConfig { + return s +} diff --git a/typedapi/types/modelsizestats.go b/typedapi/types/modelsizestats.go index 7a0c82a278..e91ee77127 100644 --- a/typedapi/types/modelsizestats.go +++ b/typedapi/types/modelsizestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // ModelSizeStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Model.ts#L59-L81 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Model.ts#L59-L82 type ModelSizeStats struct { AssignmentMemoryBasis *string `json:"assignment_memory_basis,omitempty"` BucketAllocationFailuresCount int64 `json:"bucket_allocation_failures_count"` @@ -49,6 +49,7 @@ type ModelSizeStats struct { ModelBytes ByteSize `json:"model_bytes"` ModelBytesExceeded ByteSize `json:"model_bytes_exceeded,omitempty"` ModelBytesMemoryLimit ByteSize `json:"model_bytes_memory_limit,omitempty"` + OutputMemoryAllocatorBytes ByteSize `json:"output_memory_allocator_bytes,omitempty"` PeakModelBytes ByteSize `json:"peak_model_bytes,omitempty"` RareCategoryCount int `json:"rare_category_count"` ResultType string `json:"result_type"` @@ -200,6 +201,11 @@ func (s *ModelSizeStats) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ModelBytesMemoryLimit", err) } + case "output_memory_allocator_bytes": + if err := dec.Decode(&s.OutputMemoryAllocatorBytes); err != nil { + return fmt.Errorf("%s | %w", "OutputMemoryAllocatorBytes", err) + } + case "peak_model_bytes": if err := dec.Decode(&s.PeakModelBytes); err != nil { return fmt.Errorf("%s | %w", "PeakModelBytes", err) @@ -320,3 +326,5 @@ func NewModelSizeStats() *ModelSizeStats { return r } + +// false diff --git a/typedapi/types/modelsnapshot.go b/typedapi/types/modelsnapshot.go index a06011d581..9239cd2ce5 100644 --- a/typedapi/types/modelsnapshot.go +++ b/typedapi/types/modelsnapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ModelSnapshot type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Model.ts#L25-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Model.ts#L25-L46 type ModelSnapshot struct { // Description An optional description of the job. Description *string `json:"description,omitempty"` @@ -192,3 +192,5 @@ func NewModelSnapshot() *ModelSnapshot { return r } + +// false diff --git a/typedapi/types/modelsnapshotupgrade.go b/typedapi/types/modelsnapshotupgrade.go index e983e879f6..8d1dfdbd90 100644 --- a/typedapi/types/modelsnapshotupgrade.go +++ b/typedapi/types/modelsnapshotupgrade.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // ModelSnapshotUpgrade type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Model.ts#L48-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Model.ts#L48-L57 type ModelSnapshotUpgrade struct { AssignmentExplanation string `json:"assignment_explanation"` JobId string `json:"job_id"` @@ -100,3 +100,5 @@ func NewModelSnapshotUpgrade() *ModelSnapshotUpgrade { return r } + +// false diff --git a/typedapi/types/monitoring.go b/typedapi/types/monitoring.go index 9bfae00c11..49f23a1400 100644 --- a/typedapi/types/monitoring.go +++ b/typedapi/types/monitoring.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Monitoring type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L381-L384 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L391-L394 type Monitoring struct { Available bool `json:"available"` CollectionEnabled bool `json:"collection_enabled"` @@ -112,8 +112,10 @@ func (s *Monitoring) UnmarshalJSON(data []byte) error { // NewMonitoring returns a Monitoring. func NewMonitoring() *Monitoring { r := &Monitoring{ - EnabledExporters: make(map[string]int64, 0), + EnabledExporters: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/morelikethisquery.go b/typedapi/types/morelikethisquery.go index aa705a35b7..05fba8a97d 100644 --- a/typedapi/types/morelikethisquery.go +++ b/typedapi/types/morelikethisquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MoreLikeThisQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L87-L169 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L87-L172 type MoreLikeThisQuery struct { // Analyzer The analyzer that is used to analyze the free form text. // Defaults to the analyzer associated with the first field in fields. @@ -371,3 +371,13 @@ func NewMoreLikeThisQuery() *MoreLikeThisQuery { return r } + +// true + +type MoreLikeThisQueryVariant interface { + MoreLikeThisQueryCaster() *MoreLikeThisQuery +} + +func (s *MoreLikeThisQuery) MoreLikeThisQueryCaster() *MoreLikeThisQuery { + return s +} diff --git a/typedapi/types/mountedsnapshot.go b/typedapi/types/mountedsnapshot.go index a4ae3b14d6..73ee51f862 100644 --- a/typedapi/types/mountedsnapshot.go +++ b/typedapi/types/mountedsnapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // MountedSnapshot type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/searchable_snapshots/mount/types.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/searchable_snapshots/mount/types.ts#L23-L27 type MountedSnapshot struct { Indices []string `json:"indices"` Shards ShardStatistics `json:"shards"` @@ -89,3 +89,5 @@ func NewMountedSnapshot() *MountedSnapshot { return r } + +// false diff --git a/typedapi/types/movingaverageaggregation.go b/typedapi/types/movingaverageaggregation.go index 37aa167105..83be68c5bd 100644 --- a/typedapi/types/movingaverageaggregation.go +++ b/typedapi/types/movingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,5 +28,9 @@ package types // HoltMovingAverageAggregation // HoltWintersMovingAverageAggregation // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L228-L234 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L254-L260 type MovingAverageAggregation any + +type MovingAverageAggregationVariant interface { + MovingAverageAggregationCaster() *MovingAverageAggregation +} diff --git a/typedapi/types/movingfunctionaggregation.go b/typedapi/types/movingfunctionaggregation.go index 61f8fad66d..a4d47819e1 100644 --- a/typedapi/types/movingfunctionaggregation.go +++ b/typedapi/types/movingfunctionaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MovingFunctionAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L288-L303 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L314-L332 type MovingFunctionAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -146,3 +146,13 @@ func NewMovingFunctionAggregation() *MovingFunctionAggregation { return r } + +// true + +type MovingFunctionAggregationVariant interface { + MovingFunctionAggregationCaster() *MovingFunctionAggregation +} + +func (s *MovingFunctionAggregation) MovingFunctionAggregationCaster() *MovingFunctionAggregation { + return s +} diff --git a/typedapi/types/movingpercentilesaggregation.go b/typedapi/types/movingpercentilesaggregation.go index e9d3e7037f..8885dc74d7 100644 --- a/typedapi/types/movingpercentilesaggregation.go +++ b/typedapi/types/movingpercentilesaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MovingPercentilesAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L305-L317 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L334-L349 type MovingPercentilesAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -147,3 +147,13 @@ func NewMovingPercentilesAggregation() *MovingPercentilesAggregation { return r } + +// true + +type MovingPercentilesAggregationVariant interface { + MovingPercentilesAggregationCaster() *MovingPercentilesAggregation +} + +func (s *MovingPercentilesAggregation) MovingPercentilesAggregationCaster() *MovingPercentilesAggregation { + return s +} diff --git a/typedapi/types/msearchrequestitem.go b/typedapi/types/msearchrequestitem.go index 121c92f758..5ee86eb48f 100644 --- a/typedapi/types/msearchrequestitem.go +++ b/typedapi/types/msearchrequestitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // MultisearchHeader // MultisearchBody // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch/types.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch/types.ts#L47-L50 type MsearchRequestItem any + +type MsearchRequestItemVariant interface { + MsearchRequestItemCaster() *MsearchRequestItem +} diff --git a/typedapi/types/msearchresponseitem.go b/typedapi/types/msearchresponseitem.go index a00b2b12f2..d65a8f84ad 100644 --- a/typedapi/types/msearchresponseitem.go +++ b/typedapi/types/msearchresponseitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // MultiSearchItem // ErrorResponseBase // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch/types.ts#L208-L211 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch/types.ts#L211-L214 type MsearchResponseItem any diff --git a/typedapi/types/mtermvectorsoperation.go b/typedapi/types/mtermvectorsoperation.go index dce2785430..2ada406e24 100644 --- a/typedapi/types/mtermvectorsoperation.go +++ b/typedapi/types/mtermvectorsoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // MTermVectorsOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/mtermvectors/types.ts#L35-L94 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/mtermvectors/types.ts#L35-L94 type MTermVectorsOperation struct { // Doc An artificial document (a document not present in the index) for which you // want to retrieve term vectors. @@ -215,3 +215,13 @@ func NewMTermVectorsOperation() *MTermVectorsOperation { return r } + +// true + +type MTermVectorsOperationVariant interface { + MTermVectorsOperationCaster() *MTermVectorsOperation +} + +func (s *MTermVectorsOperation) MTermVectorsOperationCaster() *MTermVectorsOperation { + return s +} diff --git a/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go b/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go deleted file mode 100644 index 35ee49bf5c..0000000000 --- a/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseAdjacencyMatrixBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseAdjacencyMatrixBucket struct { - Buckets BucketsAdjacencyMatrixBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseAdjacencyMatrixBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]AdjacencyMatrixBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []AdjacencyMatrixBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseAdjacencyMatrixBucket returns a MultiBucketAggregateBaseAdjacencyMatrixBucket. -func NewMultiBucketAggregateBaseAdjacencyMatrixBucket() *MultiBucketAggregateBaseAdjacencyMatrixBucket { - r := &MultiBucketAggregateBaseAdjacencyMatrixBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasecompositebucket.go b/typedapi/types/multibucketaggregatebasecompositebucket.go deleted file mode 100644 index 2665127f53..0000000000 --- a/typedapi/types/multibucketaggregatebasecompositebucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseCompositeBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseCompositeBucket struct { - Buckets BucketsCompositeBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseCompositeBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]CompositeBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []CompositeBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseCompositeBucket returns a MultiBucketAggregateBaseCompositeBucket. -func NewMultiBucketAggregateBaseCompositeBucket() *MultiBucketAggregateBaseCompositeBucket { - r := &MultiBucketAggregateBaseCompositeBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasedatehistogrambucket.go b/typedapi/types/multibucketaggregatebasedatehistogrambucket.go deleted file mode 100644 index 274ea97360..0000000000 --- a/typedapi/types/multibucketaggregatebasedatehistogrambucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseDateHistogramBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseDateHistogramBucket struct { - Buckets BucketsDateHistogramBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseDateHistogramBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]DateHistogramBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []DateHistogramBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseDateHistogramBucket returns a MultiBucketAggregateBaseDateHistogramBucket. -func NewMultiBucketAggregateBaseDateHistogramBucket() *MultiBucketAggregateBaseDateHistogramBucket { - r := &MultiBucketAggregateBaseDateHistogramBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasedoubletermsbucket.go b/typedapi/types/multibucketaggregatebasedoubletermsbucket.go deleted file mode 100644 index da9c297020..0000000000 --- a/typedapi/types/multibucketaggregatebasedoubletermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseDoubleTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseDoubleTermsBucket struct { - Buckets BucketsDoubleTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseDoubleTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]DoubleTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []DoubleTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseDoubleTermsBucket returns a MultiBucketAggregateBaseDoubleTermsBucket. -func NewMultiBucketAggregateBaseDoubleTermsBucket() *MultiBucketAggregateBaseDoubleTermsBucket { - r := &MultiBucketAggregateBaseDoubleTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasefiltersbucket.go b/typedapi/types/multibucketaggregatebasefiltersbucket.go deleted file mode 100644 index 5a9a504a1f..0000000000 --- a/typedapi/types/multibucketaggregatebasefiltersbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseFiltersBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseFiltersBucket struct { - Buckets BucketsFiltersBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseFiltersBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]FiltersBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []FiltersBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseFiltersBucket returns a MultiBucketAggregateBaseFiltersBucket. -func NewMultiBucketAggregateBaseFiltersBucket() *MultiBucketAggregateBaseFiltersBucket { - r := &MultiBucketAggregateBaseFiltersBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasefrequentitemsetsbucket.go b/typedapi/types/multibucketaggregatebasefrequentitemsetsbucket.go deleted file mode 100644 index 74875d6abf..0000000000 --- a/typedapi/types/multibucketaggregatebasefrequentitemsetsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseFrequentItemSetsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseFrequentItemSetsBucket struct { - Buckets BucketsFrequentItemSetsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseFrequentItemSetsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]FrequentItemSetsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []FrequentItemSetsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseFrequentItemSetsBucket returns a MultiBucketAggregateBaseFrequentItemSetsBucket. -func NewMultiBucketAggregateBaseFrequentItemSetsBucket() *MultiBucketAggregateBaseFrequentItemSetsBucket { - r := &MultiBucketAggregateBaseFrequentItemSetsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasegeohashgridbucket.go b/typedapi/types/multibucketaggregatebasegeohashgridbucket.go deleted file mode 100644 index 7be4981b42..0000000000 --- a/typedapi/types/multibucketaggregatebasegeohashgridbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseGeoHashGridBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseGeoHashGridBucket struct { - Buckets BucketsGeoHashGridBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseGeoHashGridBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]GeoHashGridBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []GeoHashGridBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseGeoHashGridBucket returns a MultiBucketAggregateBaseGeoHashGridBucket. -func NewMultiBucketAggregateBaseGeoHashGridBucket() *MultiBucketAggregateBaseGeoHashGridBucket { - r := &MultiBucketAggregateBaseGeoHashGridBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasegeohexgridbucket.go b/typedapi/types/multibucketaggregatebasegeohexgridbucket.go deleted file mode 100644 index 7974475347..0000000000 --- a/typedapi/types/multibucketaggregatebasegeohexgridbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseGeoHexGridBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseGeoHexGridBucket struct { - Buckets BucketsGeoHexGridBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseGeoHexGridBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]GeoHexGridBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []GeoHexGridBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseGeoHexGridBucket returns a MultiBucketAggregateBaseGeoHexGridBucket. -func NewMultiBucketAggregateBaseGeoHexGridBucket() *MultiBucketAggregateBaseGeoHexGridBucket { - r := &MultiBucketAggregateBaseGeoHexGridBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasegeotilegridbucket.go b/typedapi/types/multibucketaggregatebasegeotilegridbucket.go deleted file mode 100644 index ffca517eef..0000000000 --- a/typedapi/types/multibucketaggregatebasegeotilegridbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseGeoTileGridBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseGeoTileGridBucket struct { - Buckets BucketsGeoTileGridBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseGeoTileGridBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]GeoTileGridBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []GeoTileGridBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseGeoTileGridBucket returns a MultiBucketAggregateBaseGeoTileGridBucket. -func NewMultiBucketAggregateBaseGeoTileGridBucket() *MultiBucketAggregateBaseGeoTileGridBucket { - r := &MultiBucketAggregateBaseGeoTileGridBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasehistogrambucket.go b/typedapi/types/multibucketaggregatebasehistogrambucket.go deleted file mode 100644 index 9d14ff6380..0000000000 --- a/typedapi/types/multibucketaggregatebasehistogrambucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseHistogramBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseHistogramBucket struct { - Buckets BucketsHistogramBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseHistogramBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]HistogramBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []HistogramBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseHistogramBucket returns a MultiBucketAggregateBaseHistogramBucket. -func NewMultiBucketAggregateBaseHistogramBucket() *MultiBucketAggregateBaseHistogramBucket { - r := &MultiBucketAggregateBaseHistogramBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaseipprefixbucket.go b/typedapi/types/multibucketaggregatebaseipprefixbucket.go deleted file mode 100644 index a600e91692..0000000000 --- a/typedapi/types/multibucketaggregatebaseipprefixbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseIpPrefixBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseIpPrefixBucket struct { - Buckets BucketsIpPrefixBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseIpPrefixBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]IpPrefixBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []IpPrefixBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseIpPrefixBucket returns a MultiBucketAggregateBaseIpPrefixBucket. -func NewMultiBucketAggregateBaseIpPrefixBucket() *MultiBucketAggregateBaseIpPrefixBucket { - r := &MultiBucketAggregateBaseIpPrefixBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaseiprangebucket.go b/typedapi/types/multibucketaggregatebaseiprangebucket.go deleted file mode 100644 index 4a4daa82d7..0000000000 --- a/typedapi/types/multibucketaggregatebaseiprangebucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseIpRangeBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseIpRangeBucket struct { - Buckets BucketsIpRangeBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseIpRangeBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]IpRangeBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []IpRangeBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseIpRangeBucket returns a MultiBucketAggregateBaseIpRangeBucket. -func NewMultiBucketAggregateBaseIpRangeBucket() *MultiBucketAggregateBaseIpRangeBucket { - r := &MultiBucketAggregateBaseIpRangeBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaselongraretermsbucket.go b/typedapi/types/multibucketaggregatebaselongraretermsbucket.go deleted file mode 100644 index e41f38dee6..0000000000 --- a/typedapi/types/multibucketaggregatebaselongraretermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseLongRareTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseLongRareTermsBucket struct { - Buckets BucketsLongRareTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseLongRareTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]LongRareTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []LongRareTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseLongRareTermsBucket returns a MultiBucketAggregateBaseLongRareTermsBucket. -func NewMultiBucketAggregateBaseLongRareTermsBucket() *MultiBucketAggregateBaseLongRareTermsBucket { - r := &MultiBucketAggregateBaseLongRareTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaselongtermsbucket.go b/typedapi/types/multibucketaggregatebaselongtermsbucket.go deleted file mode 100644 index b09717e5ea..0000000000 --- a/typedapi/types/multibucketaggregatebaselongtermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseLongTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseLongTermsBucket struct { - Buckets BucketsLongTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseLongTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]LongTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []LongTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseLongTermsBucket returns a MultiBucketAggregateBaseLongTermsBucket. -func NewMultiBucketAggregateBaseLongTermsBucket() *MultiBucketAggregateBaseLongTermsBucket { - r := &MultiBucketAggregateBaseLongTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasemultitermsbucket.go b/typedapi/types/multibucketaggregatebasemultitermsbucket.go deleted file mode 100644 index 3d60738db7..0000000000 --- a/typedapi/types/multibucketaggregatebasemultitermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseMultiTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseMultiTermsBucket struct { - Buckets BucketsMultiTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseMultiTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]MultiTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []MultiTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseMultiTermsBucket returns a MultiBucketAggregateBaseMultiTermsBucket. -func NewMultiBucketAggregateBaseMultiTermsBucket() *MultiBucketAggregateBaseMultiTermsBucket { - r := &MultiBucketAggregateBaseMultiTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebaserangebucket.go b/typedapi/types/multibucketaggregatebaserangebucket.go deleted file mode 100644 index deb3c800da..0000000000 --- a/typedapi/types/multibucketaggregatebaserangebucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseRangeBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseRangeBucket struct { - Buckets BucketsRangeBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseRangeBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]RangeBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []RangeBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseRangeBucket returns a MultiBucketAggregateBaseRangeBucket. -func NewMultiBucketAggregateBaseRangeBucket() *MultiBucketAggregateBaseRangeBucket { - r := &MultiBucketAggregateBaseRangeBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go b/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go deleted file mode 100644 index dabfbac2fa..0000000000 --- a/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseSignificantLongTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseSignificantLongTermsBucket struct { - Buckets BucketsSignificantLongTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseSignificantLongTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]SignificantLongTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []SignificantLongTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseSignificantLongTermsBucket returns a MultiBucketAggregateBaseSignificantLongTermsBucket. -func NewMultiBucketAggregateBaseSignificantLongTermsBucket() *MultiBucketAggregateBaseSignificantLongTermsBucket { - r := &MultiBucketAggregateBaseSignificantLongTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go b/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go deleted file mode 100644 index 97885c4859..0000000000 --- a/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseSignificantStringTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseSignificantStringTermsBucket struct { - Buckets BucketsSignificantStringTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseSignificantStringTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]SignificantStringTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []SignificantStringTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseSignificantStringTermsBucket returns a MultiBucketAggregateBaseSignificantStringTermsBucket. -func NewMultiBucketAggregateBaseSignificantStringTermsBucket() *MultiBucketAggregateBaseSignificantStringTermsBucket { - r := &MultiBucketAggregateBaseSignificantStringTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasestringraretermsbucket.go b/typedapi/types/multibucketaggregatebasestringraretermsbucket.go deleted file mode 100644 index 0e824b89e3..0000000000 --- a/typedapi/types/multibucketaggregatebasestringraretermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseStringRareTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseStringRareTermsBucket struct { - Buckets BucketsStringRareTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseStringRareTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]StringRareTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []StringRareTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseStringRareTermsBucket returns a MultiBucketAggregateBaseStringRareTermsBucket. -func NewMultiBucketAggregateBaseStringRareTermsBucket() *MultiBucketAggregateBaseStringRareTermsBucket { - r := &MultiBucketAggregateBaseStringRareTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasestringtermsbucket.go b/typedapi/types/multibucketaggregatebasestringtermsbucket.go deleted file mode 100644 index e9e1a171f0..0000000000 --- a/typedapi/types/multibucketaggregatebasestringtermsbucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseStringTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseStringTermsBucket struct { - Buckets BucketsStringTermsBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseStringTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]StringTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []StringTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseStringTermsBucket returns a MultiBucketAggregateBaseStringTermsBucket. -func NewMultiBucketAggregateBaseStringTermsBucket() *MultiBucketAggregateBaseStringTermsBucket { - r := &MultiBucketAggregateBaseStringTermsBucket{} - - return r -} diff --git a/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go b/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go deleted file mode 100644 index 04201a1053..0000000000 --- a/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// MultiBucketAggregateBaseVariableWidthHistogramBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseVariableWidthHistogramBucket struct { - Buckets BucketsVariableWidthHistogramBucket `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *MultiBucketAggregateBaseVariableWidthHistogramBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]VariableWidthHistogramBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []VariableWidthHistogramBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewMultiBucketAggregateBaseVariableWidthHistogramBucket returns a MultiBucketAggregateBaseVariableWidthHistogramBucket. -func NewMultiBucketAggregateBaseVariableWidthHistogramBucket() *MultiBucketAggregateBaseVariableWidthHistogramBucket { - r := &MultiBucketAggregateBaseVariableWidthHistogramBucket{} - - return r -} diff --git a/typedapi/types/multigeterror.go b/typedapi/types/multigeterror.go index f3705b3391..110591c3d5 100644 --- a/typedapi/types/multigeterror.go +++ b/typedapi/types/multigeterror.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // MultiGetError type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/mget/types.ts#L62-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/mget/types.ts#L62-L66 type MultiGetError struct { Error ErrorCause `json:"error"` Id_ string `json:"_id"` @@ -78,3 +78,5 @@ func NewMultiGetError() *MultiGetError { return r } + +// false diff --git a/typedapi/types/multimatchquery.go b/typedapi/types/multimatchquery.go index 93bc99c371..664383ba71 100644 --- a/typedapi/types/multimatchquery.go +++ b/typedapi/types/multimatchquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -35,7 +35,7 @@ import ( // MultiMatchQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L456-L539 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L471-L557 type MultiMatchQuery struct { // Analyzer Analyzer used to convert the text in the query value into tokens. Analyzer *string `json:"analyzer,omitempty"` @@ -334,3 +334,13 @@ func NewMultiMatchQuery() *MultiMatchQuery { return r } + +// true + +type MultiMatchQueryVariant interface { + MultiMatchQueryCaster() *MultiMatchQuery +} + +func (s *MultiMatchQuery) MultiMatchQueryCaster() *MultiMatchQuery { + return s +} diff --git a/typedapi/types/multiplexertokenfilter.go b/typedapi/types/multiplexertokenfilter.go index f0de767299..a8af3abf93 100644 --- a/typedapi/types/multiplexertokenfilter.go +++ b/typedapi/types/multiplexertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // MultiplexerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L262-L266 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L261-L265 type MultiplexerTokenFilter struct { Filters []string `json:"filters"` PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` @@ -99,3 +99,13 @@ func NewMultiplexerTokenFilter() *MultiplexerTokenFilter { return r } + +// true + +type MultiplexerTokenFilterVariant interface { + MultiplexerTokenFilterCaster() *MultiplexerTokenFilter +} + +func (s *MultiplexerTokenFilter) MultiplexerTokenFilterCaster() *MultiplexerTokenFilter { + return s +} diff --git a/typedapi/types/multisearchbody.go b/typedapi/types/multisearchbody.go index ec3dea0ce2..75a051431d 100644 --- a/typedapi/types/multisearchbody.go +++ b/typedapi/types/multisearchbody.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MultisearchBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch/types.ts#L70-L201 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch/types.ts#L70-L204 type MultisearchBody struct { Aggregations map[string]Aggregations `json:"aggregations,omitempty"` Collapse *FieldCollapse `json:"collapse,omitempty"` @@ -478,10 +478,20 @@ func (s *MultisearchBody) UnmarshalJSON(data []byte) error { // NewMultisearchBody returns a MultisearchBody. func NewMultisearchBody() *MultisearchBody { r := &MultisearchBody{ - Aggregations: make(map[string]Aggregations, 0), - Ext: make(map[string]json.RawMessage, 0), - ScriptFields: make(map[string]ScriptField, 0), + Aggregations: make(map[string]Aggregations), + Ext: make(map[string]json.RawMessage), + ScriptFields: make(map[string]ScriptField), } return r } + +// true + +type MultisearchBodyVariant interface { + MultisearchBodyCaster() *MultisearchBody +} + +func (s *MultisearchBody) MultisearchBodyCaster() *MultisearchBody { + return s +} diff --git a/typedapi/types/multisearchheader.go b/typedapi/types/multisearchheader.go index c00dead79f..3c64731e7c 100644 --- a/typedapi/types/multisearchheader.go +++ b/typedapi/types/multisearchheader.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // MultisearchHeader type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch/types.ts#L52-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch/types.ts#L52-L67 type MultisearchHeader struct { AllowNoIndices *bool `json:"allow_no_indices,omitempty"` AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` @@ -213,3 +213,13 @@ func NewMultisearchHeader() *MultisearchHeader { return r } + +// true + +type MultisearchHeaderVariant interface { + MultisearchHeaderCaster() *MultisearchHeader +} + +func (s *MultisearchHeader) MultisearchHeaderCaster() *MultisearchHeader { + return s +} diff --git a/typedapi/types/multisearchitem.go b/typedapi/types/multisearchitem.go index fad0909cc2..4288c20ae1 100644 --- a/typedapi/types/multisearchitem.go +++ b/typedapi/types/multisearchitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,23 +32,47 @@ import ( // MultiSearchItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch/types.ts#L213-L216 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch/types.ts#L216-L219 type MultiSearchItem struct { - Aggregations map[string]Aggregate `json:"aggregations,omitempty"` - Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits HitsMetadata `json:"hits"` - MaxScore *Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` - Shards_ ShardStatistics `json:"_shards"` - Status *int `json:"status,omitempty"` - Suggest map[string][]Suggest `json:"suggest,omitempty"` - TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` + Aggregations map[string]Aggregate `json:"aggregations,omitempty"` + Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits HitsMetadata `json:"hits"` + MaxScore *Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. + Shards_ ShardStatistics `json:"_shards"` + Status *int `json:"status,omitempty"` + Suggest map[string][]Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` } func (s *MultiSearchItem) UnmarshalJSON(data []byte) error { @@ -494,6 +518,13 @@ func (s *MultiSearchItem) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -790,10 +821,12 @@ func (s *MultiSearchItem) UnmarshalJSON(data []byte) error { // NewMultiSearchItem returns a MultiSearchItem. func NewMultiSearchItem() *MultiSearchItem { r := &MultiSearchItem{ - Aggregations: make(map[string]Aggregate, 0), - Fields: make(map[string]json.RawMessage, 0), - Suggest: make(map[string][]Suggest, 0), + Aggregations: make(map[string]Aggregate), + Fields: make(map[string]json.RawMessage), + Suggest: make(map[string][]Suggest), } return r } + +// false diff --git a/typedapi/types/multisearchresult.go b/typedapi/types/multisearchresult.go deleted file mode 100644 index 2d2ae69b32..0000000000 --- a/typedapi/types/multisearchresult.go +++ /dev/null @@ -1,128 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// MultiSearchResult type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch/types.ts#L203-L206 -type MultiSearchResult struct { - Responses []MsearchResponseItem `json:"responses"` - Took int64 `json:"took"` -} - -func (s *MultiSearchResult) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "responses": - messageArray := []json.RawMessage{} - if err := dec.Decode(&messageArray); err != nil { - return fmt.Errorf("%s | %w", "Responses", err) - } - responses_field: - for _, message := range messageArray { - keyDec := json.NewDecoder(bytes.NewReader(message)) - for { - t, err := keyDec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return fmt.Errorf("%s | %w", "Responses", err) - } - - switch t { - - case "aggregations", "_clusters", "fields", "hits", "max_score", "num_reduce_phases", "pit_id", "profile", "_scroll_id", "_shards", "suggest", "terminated_early", "timed_out", "took": - o := NewMultiSearchItem() - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Responses", err) - } - s.Responses = append(s.Responses, o) - continue responses_field - - case "error": - o := NewErrorResponseBase() - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Responses", err) - } - s.Responses = append(s.Responses, o) - continue responses_field - - } - } - - var o any - localDec := json.NewDecoder(bytes.NewReader(message)) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Responses", err) - } - s.Responses = append(s.Responses, o) - } - - case "took": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Took", err) - } - s.Took = value - case float64: - f := int64(v) - s.Took = f - } - - } - } - return nil -} - -// NewMultiSearchResult returns a MultiSearchResult. -func NewMultiSearchResult() *MultiSearchResult { - r := &MultiSearchResult{} - - return r -} diff --git a/typedapi/types/multitermlookup.go b/typedapi/types/multitermlookup.go index fcee79f57b..9fb44b0404 100644 --- a/typedapi/types/multitermlookup.go +++ b/typedapi/types/multitermlookup.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // MultiTermLookup type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L626-L636 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L643-L653 type MultiTermLookup struct { // Field A fields from which to retrieve terms. Field string `json:"field"` @@ -75,3 +75,13 @@ func NewMultiTermLookup() *MultiTermLookup { return r } + +// true + +type MultiTermLookupVariant interface { + MultiTermLookupCaster() *MultiTermLookup +} + +func (s *MultiTermLookup) MultiTermLookupCaster() *MultiTermLookup { + return s +} diff --git a/typedapi/types/multitermsaggregate.go b/typedapi/types/multitermsaggregate.go index 53276c62be..2d7cc513b0 100644 --- a/typedapi/types/multitermsaggregate.go +++ b/typedapi/types/multitermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MultiTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L463-L465 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L501-L506 type MultiTermsAggregate struct { Buckets BucketsMultiTermsBucket `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewMultiTermsAggregate() *MultiTermsAggregate { return r } + +// false diff --git a/typedapi/types/multitermsaggregation.go b/typedapi/types/multitermsaggregation.go index c99877e657..b2729c9c00 100644 --- a/typedapi/types/multitermsaggregation.go +++ b/typedapi/types/multitermsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // MultiTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L584-L624 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L601-L641 type MultiTermsAggregation struct { // CollectMode Specifies the strategy for data collection. CollectMode *termsaggregationcollectmode.TermsAggregationCollectMode `json:"collect_mode,omitempty"` @@ -191,3 +191,13 @@ func NewMultiTermsAggregation() *MultiTermsAggregation { return r } + +// true + +type MultiTermsAggregationVariant interface { + MultiTermsAggregationCaster() *MultiTermsAggregation +} + +func (s *MultiTermsAggregation) MultiTermsAggregationCaster() *MultiTermsAggregation { + return s +} diff --git a/typedapi/types/multitermsbucket.go b/typedapi/types/multitermsbucket.go index 2c1bbe3be4..c3f704bd44 100644 --- a/typedapi/types/multitermsbucket.go +++ b/typedapi/types/multitermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // MultiTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L467-L471 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L508-L512 type MultiTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -520,6 +520,13 @@ func (s *MultiTermsBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -653,8 +660,10 @@ func (s MultiTermsBucket) MarshalJSON() ([]byte, error) { // NewMultiTermsBucket returns a MultiTermsBucket. func NewMultiTermsBucket() *MultiTermsBucket { r := &MultiTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/murmur3hashproperty.go b/typedapi/types/murmur3hashproperty.go index f15ad4c80f..83180c0ffa 100644 --- a/typedapi/types/murmur3hashproperty.go +++ b/typedapi/types/murmur3hashproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // Murmur3HashProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/specialized.ts#L81-L83 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L90-L92 type Murmur3HashProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -41,11 +42,11 @@ type Murmur3HashProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { @@ -117,301 +118,313 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -460,318 +473,318 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -786,6 +799,11 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -800,16 +818,16 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { func (s Murmur3HashProperty) MarshalJSON() ([]byte, error) { type innerMurmur3HashProperty Murmur3HashProperty tmp := innerMurmur3HashProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "murmur3" @@ -820,10 +838,20 @@ func (s Murmur3HashProperty) MarshalJSON() ([]byte, error) { // NewMurmur3HashProperty returns a Murmur3HashProperty. func NewMurmur3HashProperty() *Murmur3HashProperty { r := &Murmur3HashProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type Murmur3HashPropertyVariant interface { + Murmur3HashPropertyCaster() *Murmur3HashProperty +} + +func (s *Murmur3HashProperty) Murmur3HashPropertyCaster() *Murmur3HashProperty { + return s +} diff --git a/typedapi/types/mutualinformationheuristic.go b/typedapi/types/mutualinformationheuristic.go index 566ab013a8..d5f21cc52e 100644 --- a/typedapi/types/mutualinformationheuristic.go +++ b/typedapi/types/mutualinformationheuristic.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // MutualInformationHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L755-L764 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L800-L809 type MutualInformationHeuristic struct { // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a // different set of documents that you want to compare to. @@ -95,3 +95,13 @@ func NewMutualInformationHeuristic() *MutualInformationHeuristic { return r } + +// true + +type MutualInformationHeuristicVariant interface { + MutualInformationHeuristicCaster() *MutualInformationHeuristic +} + +func (s *MutualInformationHeuristic) MutualInformationHeuristicCaster() *MutualInformationHeuristic { + return s +} diff --git a/typedapi/types/names.go b/typedapi/types/names.go index fe22fc4b40..89a8c42bfb 100644 --- a/typedapi/types/names.go +++ b/typedapi/types/names.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Names type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L81-L81 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L81-L81 type Names []string + +type NamesVariant interface { + NamesCaster() *Names +} diff --git a/typedapi/types/nativecode.go b/typedapi/types/nativecode.go index da71904cad..d1ba3b60e6 100644 --- a/typedapi/types/nativecode.go +++ b/typedapi/types/nativecode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NativeCode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/info/types.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/info/types.ts#L29-L32 type NativeCode struct { BuildHash string `json:"build_hash"` Version string `json:"version"` @@ -80,3 +80,5 @@ func NewNativeCode() *NativeCode { return r } + +// false diff --git a/typedapi/types/nativecodeinformation.go b/typedapi/types/nativecodeinformation.go index 9d60a1c067..663c9a9caf 100644 --- a/typedapi/types/nativecodeinformation.go +++ b/typedapi/types/nativecodeinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NativeCodeInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/info/types.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/info/types.ts#L29-L32 type NativeCodeInformation struct { BuildHash string `json:"build_hash"` Version string `json:"version"` @@ -80,3 +80,5 @@ func NewNativeCodeInformation() *NativeCodeInformation { return r } + +// false diff --git a/typedapi/types/nerinferenceoptions.go b/typedapi/types/nerinferenceoptions.go index ea7bd71754..952348be09 100644 --- a/typedapi/types/nerinferenceoptions.go +++ b/typedapi/types/nerinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NerInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L255-L264 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L242-L251 type NerInferenceOptions struct { // ClassificationLabels The token classification labels. Must be IOB formatted tags ClassificationLabels []string `json:"classification_labels,omitempty"` @@ -96,3 +96,13 @@ func NewNerInferenceOptions() *NerInferenceOptions { return r } + +// true + +type NerInferenceOptionsVariant interface { + NerInferenceOptionsCaster() *NerInferenceOptions +} + +func (s *NerInferenceOptions) NerInferenceOptionsCaster() *NerInferenceOptions { + return s +} diff --git a/typedapi/types/nerinferenceupdateoptions.go b/typedapi/types/nerinferenceupdateoptions.go index cdc7d18290..969289ea41 100644 --- a/typedapi/types/nerinferenceupdateoptions.go +++ b/typedapi/types/nerinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NerInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L404-L409 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L392-L397 type NerInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -83,3 +83,13 @@ func NewNerInferenceUpdateOptions() *NerInferenceUpdateOptions { return r } + +// true + +type NerInferenceUpdateOptionsVariant interface { + NerInferenceUpdateOptionsCaster() *NerInferenceUpdateOptions +} + +func (s *NerInferenceUpdateOptions) NerInferenceUpdateOptionsCaster() *NerInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/nestedaggregate.go b/typedapi/types/nestedaggregate.go index c48d0f901c..05a4202426 100644 --- a/typedapi/types/nestedaggregate.go +++ b/typedapi/types/nestedaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // NestedAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L490-L491 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L534-L538 type NestedAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *NestedAggregate) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s NestedAggregate) MarshalJSON() ([]byte, error) { // NewNestedAggregate returns a NestedAggregate. func NewNestedAggregate() *NestedAggregate { r := &NestedAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/nestedaggregation.go b/typedapi/types/nestedaggregation.go index c4f94a2356..d71e126799 100644 --- a/typedapi/types/nestedaggregation.go +++ b/typedapi/types/nestedaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // NestedAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L638-L643 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L655-L660 type NestedAggregation struct { // Path The path to the field of type `nested`. Path *string `json:"path,omitempty"` @@ -67,3 +67,13 @@ func NewNestedAggregation() *NestedAggregation { return r } + +// true + +type NestedAggregationVariant interface { + NestedAggregationCaster() *NestedAggregation +} + +func (s *NestedAggregation) NestedAggregationCaster() *NestedAggregation { + return s +} diff --git a/typedapi/types/nestedidentity.go b/typedapi/types/nestedidentity.go index ef68b0fc17..b18f4f0c7d 100644 --- a/typedapi/types/nestedidentity.go +++ b/typedapi/types/nestedidentity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NestedIdentity type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/hits.ts#L89-L93 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/hits.ts#L90-L94 type NestedIdentity struct { Field string `json:"field"` Nested_ *NestedIdentity `json:"_nested,omitempty"` @@ -90,3 +90,5 @@ func NewNestedIdentity() *NestedIdentity { return r } + +// false diff --git a/typedapi/types/nestedproperty.go b/typedapi/types/nestedproperty.go index 64b3dc68fe..7c876a6319 100644 --- a/typedapi/types/nestedproperty.go +++ b/typedapi/types/nestedproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // NestedProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/complex.ts#L39-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/complex.ts#L39-L44 type NestedProperty struct { CopyTo []string `json:"copy_to,omitempty"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` @@ -43,11 +44,11 @@ type NestedProperty struct { IncludeInParent *bool `json:"include_in_parent,omitempty"` IncludeInRoot *bool `json:"include_in_root,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *NestedProperty) UnmarshalJSON(data []byte) error { @@ -119,301 +120,313 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -490,318 +503,318 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -816,6 +829,11 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -830,18 +848,18 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { func (s NestedProperty) MarshalJSON() ([]byte, error) { type innerNestedProperty NestedProperty tmp := innerNestedProperty{ - CopyTo: s.CopyTo, - Dynamic: s.Dynamic, - Enabled: s.Enabled, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IncludeInParent: s.IncludeInParent, - IncludeInRoot: s.IncludeInRoot, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IncludeInParent: s.IncludeInParent, + IncludeInRoot: s.IncludeInRoot, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "nested" @@ -852,10 +870,20 @@ func (s NestedProperty) MarshalJSON() ([]byte, error) { // NewNestedProperty returns a NestedProperty. func NewNestedProperty() *NestedProperty { r := &NestedProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type NestedPropertyVariant interface { + NestedPropertyCaster() *NestedProperty +} + +func (s *NestedProperty) NestedPropertyCaster() *NestedProperty { + return s +} diff --git a/typedapi/types/nestedquery.go b/typedapi/types/nestedquery.go index 3e48512c74..5150e112bf 100644 --- a/typedapi/types/nestedquery.go +++ b/typedapi/types/nestedquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // NestedQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/joining.ts#L106-L130 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/joining.ts#L112-L139 type NestedQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -49,7 +49,7 @@ type NestedQuery struct { // Path Path to the nested object you wish to search. Path string `json:"path"` // Query Query you wish to run on nested objects in the path. - Query *Query `json:"query,omitempty"` + Query Query `json:"query"` QueryName_ *string `json:"_name,omitempty"` // ScoreMode How scores for matching child objects affect the root parent document’s // relevance score. @@ -144,3 +144,13 @@ func NewNestedQuery() *NestedQuery { return r } + +// true + +type NestedQueryVariant interface { + NestedQueryCaster() *NestedQuery +} + +func (s *NestedQuery) NestedQueryCaster() *NestedQuery { + return s +} diff --git a/typedapi/types/nestedsortvalue.go b/typedapi/types/nestedsortvalue.go index 7efb6f061d..204c471ebf 100644 --- a/typedapi/types/nestedsortvalue.go +++ b/typedapi/types/nestedsortvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NestedSortValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L29-L34 type NestedSortValue struct { Filter *Query `json:"filter,omitempty"` MaxChildren *int `json:"max_children,omitempty"` @@ -96,3 +96,13 @@ func NewNestedSortValue() *NestedSortValue { return r } + +// true + +type NestedSortValueVariant interface { + NestedSortValueCaster() *NestedSortValue +} + +func (s *NestedSortValue) NestedSortValueCaster() *NestedSortValue { + return s +} diff --git a/typedapi/types/networkdirectionprocessor.go b/typedapi/types/networkdirectionprocessor.go new file mode 100644 index 0000000000..dd6969db51 --- /dev/null +++ b/typedapi/types/networkdirectionprocessor.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NetworkDirectionProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1230-L1264 +type NetworkDirectionProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // DestinationIp Field containing the destination IP address. + DestinationIp *string `json:"destination_ip,omitempty"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If true and any required fields are missing, the processor quietly exits + // without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // InternalNetworks List of internal networks. Supports IPv4 and IPv6 addresses and ranges in + // CIDR notation. Also supports the named ranges listed below. These may be + // constructed with template snippets. Must specify only one of + // internal_networks or internal_networks_field. + InternalNetworks []string `json:"internal_networks,omitempty"` + // InternalNetworksField A field on the given document to read the internal_networks configuration + // from. + InternalNetworksField *string `json:"internal_networks_field,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // SourceIp Field containing the source IP address. + SourceIp *string `json:"source_ip,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Output field for the network direction. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *NetworkDirectionProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "destination_ip": + if err := dec.Decode(&s.DestinationIp); err != nil { + return fmt.Errorf("%s | %w", "DestinationIp", err) + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "internal_networks": + if err := dec.Decode(&s.InternalNetworks); err != nil { + return fmt.Errorf("%s | %w", "InternalNetworks", err) + } + + case "internal_networks_field": + if err := dec.Decode(&s.InternalNetworksField); err != nil { + return fmt.Errorf("%s | %w", "InternalNetworksField", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "source_ip": + if err := dec.Decode(&s.SourceIp); err != nil { + return fmt.Errorf("%s | %w", "SourceIp", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewNetworkDirectionProcessor returns a NetworkDirectionProcessor. +func NewNetworkDirectionProcessor() *NetworkDirectionProcessor { + r := &NetworkDirectionProcessor{} + + return r +} + +// true + +type NetworkDirectionProcessorVariant interface { + NetworkDirectionProcessorCaster() *NetworkDirectionProcessor +} + +func (s *NetworkDirectionProcessor) NetworkDirectionProcessorCaster() *NetworkDirectionProcessor { + return s +} diff --git a/typedapi/types/nevercondition.go b/typedapi/types/nevercondition.go index 5e16f41040..d6851fc053 100644 --- a/typedapi/types/nevercondition.go +++ b/typedapi/types/nevercondition.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NeverCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Conditions.ts#L72-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Conditions.ts#L72-L72 type NeverCondition struct { } @@ -32,3 +32,13 @@ func NewNeverCondition() *NeverCondition { return r } + +// true + +type NeverConditionVariant interface { + NeverConditionCaster() *NeverCondition +} + +func (s *NeverCondition) NeverConditionCaster() *NeverCondition { + return s +} diff --git a/typedapi/types/ngramtokenfilter.go b/typedapi/types/ngramtokenfilter.go index 2c147f0a1c..51f9081deb 100644 --- a/typedapi/types/ngramtokenfilter.go +++ b/typedapi/types/ngramtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NGramTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L268-L273 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L267-L272 type NGramTokenFilter struct { MaxGram *int `json:"max_gram,omitempty"` MinGram *int `json:"min_gram,omitempty"` @@ -129,3 +129,13 @@ func NewNGramTokenFilter() *NGramTokenFilter { return r } + +// true + +type NGramTokenFilterVariant interface { + NGramTokenFilterCaster() *NGramTokenFilter +} + +func (s *NGramTokenFilter) NGramTokenFilterCaster() *NGramTokenFilter { + return s +} diff --git a/typedapi/types/ngramtokenizer.go b/typedapi/types/ngramtokenizer.go index 412d0c5824..1b77d3b6c7 100644 --- a/typedapi/types/ngramtokenizer.go +++ b/typedapi/types/ngramtokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,12 +33,12 @@ import ( // NGramTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L39-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L84-L93 type NGramTokenizer struct { CustomTokenChars *string `json:"custom_token_chars,omitempty"` - MaxGram int `json:"max_gram"` - MinGram int `json:"min_gram"` - TokenChars []tokenchar.TokenChar `json:"token_chars"` + MaxGram *int `json:"max_gram,omitempty"` + MinGram *int `json:"min_gram,omitempty"` + TokenChars []tokenchar.TokenChar `json:"token_chars,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } @@ -80,10 +80,10 @@ func (s *NGramTokenizer) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "MaxGram", err) } - s.MaxGram = value + s.MaxGram = &value case float64: f := int(v) - s.MaxGram = f + s.MaxGram = &f } case "min_gram": @@ -96,10 +96,10 @@ func (s *NGramTokenizer) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "MinGram", err) } - s.MinGram = value + s.MinGram = &value case float64: f := int(v) - s.MinGram = f + s.MinGram = &f } case "token_chars": @@ -145,3 +145,13 @@ func NewNGramTokenizer() *NGramTokenizer { return r } + +// true + +type NGramTokenizerVariant interface { + NGramTokenizerCaster() *NGramTokenizer +} + +func (s *NGramTokenizer) NGramTokenizerCaster() *NGramTokenizer { + return s +} diff --git a/typedapi/types/nlpberttokenizationconfig.go b/typedapi/types/nlpberttokenizationconfig.go index f19d2bbe89..015cb19a18 100644 --- a/typedapi/types/nlpberttokenizationconfig.go +++ b/typedapi/types/nlpberttokenizationconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // NlpBertTokenizationConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L131-L158 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L161-L162 type NlpBertTokenizationConfig struct { // DoLowerCase Should the tokenizer lower case the text DoLowerCase *bool `json:"do_lower_case,omitempty"` @@ -140,3 +140,13 @@ func NewNlpBertTokenizationConfig() *NlpBertTokenizationConfig { return r } + +// true + +type NlpBertTokenizationConfigVariant interface { + NlpBertTokenizationConfigCaster() *NlpBertTokenizationConfig +} + +func (s *NlpBertTokenizationConfig) NlpBertTokenizationConfigCaster() *NlpBertTokenizationConfig { + return s +} diff --git a/typedapi/types/nlprobertatokenizationconfig.go b/typedapi/types/nlprobertatokenizationconfig.go index f5c650cea7..dcd81b98cd 100644 --- a/typedapi/types/nlprobertatokenizationconfig.go +++ b/typedapi/types/nlprobertatokenizationconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,10 +33,12 @@ import ( // NlpRobertaTokenizationConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L160-L187 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L164-L171 type NlpRobertaTokenizationConfig struct { // AddPrefixSpace Should the tokenizer prefix input with a space character AddPrefixSpace *bool `json:"add_prefix_space,omitempty"` + // DoLowerCase Should the tokenizer lower case the text + DoLowerCase *bool `json:"do_lower_case,omitempty"` // MaxSequenceLength Maximum input sequence length for the model MaxSequenceLength *int `json:"max_sequence_length,omitempty"` // Span Tokenization spanning options. Special value of -1 indicates no spanning @@ -78,6 +80,20 @@ func (s *NlpRobertaTokenizationConfig) UnmarshalJSON(data []byte) error { s.AddPrefixSpace = &v } + case "do_lower_case": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DoLowerCase", err) + } + s.DoLowerCase = &value + case bool: + s.DoLowerCase = &v + } + case "max_sequence_length": var tmp any @@ -140,3 +156,13 @@ func NewNlpRobertaTokenizationConfig() *NlpRobertaTokenizationConfig { return r } + +// true + +type NlpRobertaTokenizationConfigVariant interface { + NlpRobertaTokenizationConfigCaster() *NlpRobertaTokenizationConfig +} + +func (s *NlpRobertaTokenizationConfig) NlpRobertaTokenizationConfigCaster() *NlpRobertaTokenizationConfig { + return s +} diff --git a/typedapi/types/nlptokenizationupdateoptions.go b/typedapi/types/nlptokenizationupdateoptions.go index 2052b98187..ab26f9faba 100644 --- a/typedapi/types/nlptokenizationupdateoptions.go +++ b/typedapi/types/nlptokenizationupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // NlpTokenizationUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L356-L361 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L344-L349 type NlpTokenizationUpdateOptions struct { // Span Span options to apply Span *int `json:"span,omitempty"` @@ -88,3 +88,13 @@ func NewNlpTokenizationUpdateOptions() *NlpTokenizationUpdateOptions { return r } + +// true + +type NlpTokenizationUpdateOptionsVariant interface { + NlpTokenizationUpdateOptionsCaster() *NlpTokenizationUpdateOptions +} + +func (s *NlpTokenizationUpdateOptions) NlpTokenizationUpdateOptionsCaster() *NlpTokenizationUpdateOptions { + return s +} diff --git a/typedapi/types/node.go b/typedapi/types/node.go index 5cc80eb02f..a9c6f4873b 100644 --- a/typedapi/types/node.go +++ b/typedapi/types/node.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Node type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/searchable_snapshots/cache_stats/Response.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/searchable_snapshots/cache_stats/Response.ts#L30-L32 type Node struct { SharedCache Shared `json:"shared_cache"` } @@ -33,3 +33,5 @@ func NewNode() *Node { return r } + +// false diff --git a/typedapi/types/nodeallocationexplanation.go b/typedapi/types/nodeallocationexplanation.go index 5d6f18cd27..d9efeb411c 100644 --- a/typedapi/types/nodeallocationexplanation.go +++ b/typedapi/types/nodeallocationexplanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,17 +29,19 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/decision" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) // NodeAllocationExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L97-L106 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L103-L117 type NodeAllocationExplanation struct { Deciders []AllocationDecision `json:"deciders"` NodeAttributes map[string]string `json:"node_attributes"` NodeDecision decision.Decision `json:"node_decision"` NodeId string `json:"node_id"` NodeName string `json:"node_name"` + Roles []noderole.NodeRole `json:"roles"` Store *AllocationStore `json:"store,omitempty"` TransportAddress string `json:"transport_address"` WeightRanking int `json:"weight_ranking"` @@ -88,6 +90,11 @@ func (s *NodeAllocationExplanation) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "NodeName", err) } + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + case "store": if err := dec.Decode(&s.Store); err != nil { return fmt.Errorf("%s | %w", "Store", err) @@ -122,8 +129,10 @@ func (s *NodeAllocationExplanation) UnmarshalJSON(data []byte) error { // NewNodeAllocationExplanation returns a NodeAllocationExplanation. func NewNodeAllocationExplanation() *NodeAllocationExplanation { r := &NodeAllocationExplanation{ - NodeAttributes: make(map[string]string, 0), + NodeAttributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodeattributes.go b/typedapi/types/nodeattributes.go index 82f6d3d86d..47a1554ba8 100644 --- a/typedapi/types/nodeattributes.go +++ b/typedapi/types/nodeattributes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,25 +26,20 @@ import ( "errors" "fmt" "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) // NodeAttributes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Node.ts#L41-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Node.ts#L41-L52 type NodeAttributes struct { // Attributes Lists node attributes. Attributes map[string]string `json:"attributes"` // EphemeralId The ephemeral ID of the node. - EphemeralId string `json:"ephemeral_id"` - ExternalId *string `json:"external_id,omitempty"` + EphemeralId string `json:"ephemeral_id"` // Id The unique identifier of the node. Id *string `json:"id,omitempty"` // Name The unique identifier of the node. - Name string `json:"name"` - Roles []noderole.NodeRole `json:"roles,omitempty"` + Name string `json:"name"` // TransportAddress The host and port where transport HTTP connections are accepted. TransportAddress string `json:"transport_address"` } @@ -77,18 +72,6 @@ func (s *NodeAttributes) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "EphemeralId", err) } - case "external_id": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "ExternalId", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.ExternalId = &o - case "id": if err := dec.Decode(&s.Id); err != nil { return fmt.Errorf("%s | %w", "Id", err) @@ -99,11 +82,6 @@ func (s *NodeAttributes) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Name", err) } - case "roles": - if err := dec.Decode(&s.Roles); err != nil { - return fmt.Errorf("%s | %w", "Roles", err) - } - case "transport_address": if err := dec.Decode(&s.TransportAddress); err != nil { return fmt.Errorf("%s | %w", "TransportAddress", err) @@ -117,8 +95,10 @@ func (s *NodeAttributes) UnmarshalJSON(data []byte) error { // NewNodeAttributes returns a NodeAttributes. func NewNodeAttributes() *NodeAttributes { r := &NodeAttributes{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodeattributesrecord.go b/typedapi/types/nodeattributesrecord.go index bb975528ad..64e07538a9 100644 --- a/typedapi/types/nodeattributesrecord.go +++ b/typedapi/types/nodeattributesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeAttributesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/nodeattrs/types.ts#L20-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/nodeattrs/types.ts#L20-L55 type NodeAttributesRecord struct { // Attr The attribute name. Attr *string `json:"attr,omitempty"` @@ -173,3 +173,5 @@ func NewNodeAttributesRecord() *NodeAttributesRecord { return r } + +// false diff --git a/typedapi/types/nodebufferpool.go b/typedapi/types/nodebufferpool.go index e0840ac9c8..4bbbb08a05 100644 --- a/typedapi/types/nodebufferpool.go +++ b/typedapi/types/nodebufferpool.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeBufferPool type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L788-L809 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L859-L880 type NodeBufferPool struct { // Count Number of buffer pools. Count *int64 `json:"count,omitempty"` @@ -140,3 +140,5 @@ func NewNodeBufferPool() *NodeBufferPool { return r } + +// false diff --git a/typedapi/types/nodediskusage.go b/typedapi/types/nodediskusage.go index 79ccc30712..66f2ce81a5 100644 --- a/typedapi/types/nodediskusage.go +++ b/typedapi/types/nodediskusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // NodeDiskUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L56-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L57-L61 type NodeDiskUsage struct { LeastAvailable DiskUsage `json:"least_available"` MostAvailable DiskUsage `json:"most_available"` @@ -78,3 +78,5 @@ func NewNodeDiskUsage() *NodeDiskUsage { return r } + +// false diff --git a/typedapi/types/nodeids.go b/typedapi/types/nodeids.go index 31b4ab8e19..9d09d0c3e0 100644 --- a/typedapi/types/nodeids.go +++ b/typedapi/types/nodeids.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeIds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L64-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L64-L64 type NodeIds []string diff --git a/typedapi/types/nodeinfo.go b/typedapi/types/nodeinfo.go index bf22df5833..9678021b1d 100644 --- a/typedapi/types/nodeinfo.go +++ b/typedapi/types/nodeinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // NodeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L31-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L31-L67 type NodeInfo struct { Aggregations map[string]NodeInfoAggregation `json:"aggregations,omitempty"` Attributes map[string]string `json:"attributes"` @@ -254,10 +254,12 @@ func (s *NodeInfo) UnmarshalJSON(data []byte) error { // NewNodeInfo returns a NodeInfo. func NewNodeInfo() *NodeInfo { r := &NodeInfo{ - Aggregations: make(map[string]NodeInfoAggregation, 0), - Attributes: make(map[string]string, 0), - ThreadPool: make(map[string]NodeThreadPoolInfo, 0), + Aggregations: make(map[string]NodeInfoAggregation), + Attributes: make(map[string]string), + ThreadPool: make(map[string]NodeThreadPoolInfo), } return r } + +// false diff --git a/typedapi/types/nodeinfoaction.go b/typedapi/types/nodeinfoaction.go index 2d78ff07de..11ec4f1c53 100644 --- a/typedapi/types/nodeinfoaction.go +++ b/typedapi/types/nodeinfoaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L184-L186 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L184-L186 type NodeInfoAction struct { DestructiveRequiresName string `json:"destructive_requires_name"` } @@ -74,3 +74,5 @@ func NewNodeInfoAction() *NodeInfoAction { return r } + +// false diff --git a/typedapi/types/nodeinfoaggregation.go b/typedapi/types/nodeinfoaggregation.go index 95c1f9ec16..8d948d495b 100644 --- a/typedapi/types/nodeinfoaggregation.go +++ b/typedapi/types/nodeinfoaggregation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L235-L237 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L235-L237 type NodeInfoAggregation struct { Types []string `json:"types"` } @@ -33,3 +33,5 @@ func NewNodeInfoAggregation() *NodeInfoAggregation { return r } + +// false diff --git a/typedapi/types/nodeinfobootstrap.go b/typedapi/types/nodeinfobootstrap.go index 8c1a2e23c7..fc30949966 100644 --- a/typedapi/types/nodeinfobootstrap.go +++ b/typedapi/types/nodeinfobootstrap.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoBootstrap type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L204-L206 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L204-L206 type NodeInfoBootstrap struct { MemoryLock string `json:"memory_lock"` } @@ -74,3 +74,5 @@ func NewNodeInfoBootstrap() *NodeInfoBootstrap { return r } + +// false diff --git a/typedapi/types/nodeinfoclient.go b/typedapi/types/nodeinfoclient.go index 62e7c1ace0..2f53faa462 100644 --- a/typedapi/types/nodeinfoclient.go +++ b/typedapi/types/nodeinfoclient.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoClient type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L188-L190 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L188-L190 type NodeInfoClient struct { Type string `json:"type"` } @@ -74,3 +74,5 @@ func NewNodeInfoClient() *NodeInfoClient { return r } + +// false diff --git a/typedapi/types/nodeinfodiscover.go b/typedapi/types/nodeinfodiscover.go index d4d7126172..0e5b36d441 100644 --- a/typedapi/types/nodeinfodiscover.go +++ b/typedapi/types/nodeinfodiscover.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoDiscover type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L173-L182 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L173-L182 type NodeInfoDiscover struct { NodeInfoDiscover map[string]json.RawMessage `json:"-"` SeedHosts []string `json:"seed_hosts,omitempty"` @@ -126,8 +126,10 @@ func (s NodeInfoDiscover) MarshalJSON() ([]byte, error) { // NewNodeInfoDiscover returns a NodeInfoDiscover. func NewNodeInfoDiscover() *NodeInfoDiscover { r := &NodeInfoDiscover{ - NodeInfoDiscover: make(map[string]json.RawMessage, 0), + NodeInfoDiscover: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/nodeinfohttp.go b/typedapi/types/nodeinfohttp.go index 01704caa1e..ca9b6fec50 100644 --- a/typedapi/types/nodeinfohttp.go +++ b/typedapi/types/nodeinfohttp.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoHttp type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L306-L311 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L311-L316 type NodeInfoHttp struct { BoundAddress []string `json:"bound_address"` MaxContentLength ByteSize `json:"max_content_length,omitempty"` @@ -102,3 +102,5 @@ func NewNodeInfoHttp() *NodeInfoHttp { return r } + +// false diff --git a/typedapi/types/nodeinfoingest.go b/typedapi/types/nodeinfoingest.go index 9308739f21..5018859c59 100644 --- a/typedapi/types/nodeinfoingest.go +++ b/typedapi/types/nodeinfoingest.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L227-L229 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L227-L229 type NodeInfoIngest struct { Processors []NodeInfoIngestProcessor `json:"processors"` } @@ -33,3 +33,5 @@ func NewNodeInfoIngest() *NodeInfoIngest { return r } + +// false diff --git a/typedapi/types/nodeinfoingestdownloader.go b/typedapi/types/nodeinfoingestdownloader.go index fac9c3e3a5..088e0112f5 100644 --- a/typedapi/types/nodeinfoingestdownloader.go +++ b/typedapi/types/nodeinfoingestdownloader.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoIngestDownloader type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L128-L130 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L128-L130 type NodeInfoIngestDownloader struct { Enabled string `json:"enabled"` } @@ -74,3 +74,5 @@ func NewNodeInfoIngestDownloader() *NodeInfoIngestDownloader { return r } + +// false diff --git a/typedapi/types/nodeinfoingestinfo.go b/typedapi/types/nodeinfoingestinfo.go index 0887beed22..0ff98f23fb 100644 --- a/typedapi/types/nodeinfoingestinfo.go +++ b/typedapi/types/nodeinfoingestinfo.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoIngestInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L124-L126 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L124-L126 type NodeInfoIngestInfo struct { Downloader NodeInfoIngestDownloader `json:"downloader"` } @@ -33,3 +33,5 @@ func NewNodeInfoIngestInfo() *NodeInfoIngestInfo { return r } + +// false diff --git a/typedapi/types/nodeinfoingestprocessor.go b/typedapi/types/nodeinfoingestprocessor.go index 6a7eef243b..e2984bcd63 100644 --- a/typedapi/types/nodeinfoingestprocessor.go +++ b/typedapi/types/nodeinfoingestprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoIngestProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L231-L233 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L231-L233 type NodeInfoIngestProcessor struct { Type string `json:"type"` } @@ -74,3 +74,5 @@ func NewNodeInfoIngestProcessor() *NodeInfoIngestProcessor { return r } + +// false diff --git a/typedapi/types/nodeinfojvmmemory.go b/typedapi/types/nodeinfojvmmemory.go index ff3f3273c0..cc228e1513 100644 --- a/typedapi/types/nodeinfojvmmemory.go +++ b/typedapi/types/nodeinfojvmmemory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoJvmMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L313-L324 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L318-L329 type NodeInfoJvmMemory struct { DirectMax ByteSize `json:"direct_max,omitempty"` DirectMaxInBytes int64 `json:"direct_max_in_bytes"` @@ -171,3 +171,5 @@ func NewNodeInfoJvmMemory() *NodeInfoJvmMemory { return r } + +// false diff --git a/typedapi/types/nodeinfomemory.go b/typedapi/types/nodeinfomemory.go index f5095dd170..9423c9571f 100644 --- a/typedapi/types/nodeinfomemory.go +++ b/typedapi/types/nodeinfomemory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L326-L329 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L331-L334 type NodeInfoMemory struct { Total string `json:"total"` TotalInBytes int64 `json:"total_in_bytes"` @@ -90,3 +90,5 @@ func NewNodeInfoMemory() *NodeInfoMemory { return r } + +// false diff --git a/typedapi/types/nodeinfonetwork.go b/typedapi/types/nodeinfonetwork.go index 94e6a6e856..c574fd90cd 100644 --- a/typedapi/types/nodeinfonetwork.go +++ b/typedapi/types/nodeinfonetwork.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoNetwork type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L331-L334 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L336-L339 type NodeInfoNetwork struct { PrimaryInterface NodeInfoNetworkInterface `json:"primary_interface"` RefreshInterval int `json:"refresh_interval"` @@ -84,3 +84,5 @@ func NewNodeInfoNetwork() *NodeInfoNetwork { return r } + +// false diff --git a/typedapi/types/nodeinfonetworkinterface.go b/typedapi/types/nodeinfonetworkinterface.go index 603b4f99ff..71d1d409a7 100644 --- a/typedapi/types/nodeinfonetworkinterface.go +++ b/typedapi/types/nodeinfonetworkinterface.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoNetworkInterface type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L336-L340 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L341-L345 type NodeInfoNetworkInterface struct { Address string `json:"address"` MacAddress string `json:"mac_address"` @@ -93,3 +93,5 @@ func NewNodeInfoNetworkInterface() *NodeInfoNetworkInterface { return r } + +// false diff --git a/typedapi/types/nodeinfooscpu.go b/typedapi/types/nodeinfooscpu.go index 57b09a526e..19fa8e17c7 100644 --- a/typedapi/types/nodeinfooscpu.go +++ b/typedapi/types/nodeinfooscpu.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoOSCPU type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L342-L351 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L347-L356 type NodeInfoOSCPU struct { CacheSize string `json:"cache_size"` CacheSizeInBytes int `json:"cache_size_in_bytes"` @@ -185,3 +185,5 @@ func NewNodeInfoOSCPU() *NodeInfoOSCPU { return r } + +// false diff --git a/typedapi/types/nodeinfopath.go b/typedapi/types/nodeinfopath.go index 1be1cc7965..96e61cdc4e 100644 --- a/typedapi/types/nodeinfopath.go +++ b/typedapi/types/nodeinfopath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoPath type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L158-L163 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L158-L163 type NodeInfoPath struct { Data []string `json:"data,omitempty"` Home *string `json:"home,omitempty"` @@ -55,8 +55,19 @@ func (s *NodeInfoPath) UnmarshalJSON(data []byte) error { switch t { case "data": - if err := dec.Decode(&s.Data); err != nil { - return fmt.Errorf("%s | %w", "Data", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + + s.Data = append(s.Data, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Data); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } } case "home": @@ -99,3 +110,5 @@ func NewNodeInfoPath() *NodeInfoPath { return r } + +// false diff --git a/typedapi/types/nodeinforepositories.go b/typedapi/types/nodeinforepositories.go index 6f6e5bed13..c1d0045e18 100644 --- a/typedapi/types/nodeinforepositories.go +++ b/typedapi/types/nodeinforepositories.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoRepositories type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L165-L167 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L165-L167 type NodeInfoRepositories struct { Url NodeInfoRepositoriesUrl `json:"url"` } @@ -33,3 +33,5 @@ func NewNodeInfoRepositories() *NodeInfoRepositories { return r } + +// false diff --git a/typedapi/types/nodeinforepositoriesurl.go b/typedapi/types/nodeinforepositoriesurl.go index deb6051cbe..340c5dee85 100644 --- a/typedapi/types/nodeinforepositoriesurl.go +++ b/typedapi/types/nodeinforepositoriesurl.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoRepositoriesUrl type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L169-L171 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L169-L171 type NodeInfoRepositoriesUrl struct { AllowedUrls string `json:"allowed_urls"` } @@ -74,3 +74,5 @@ func NewNodeInfoRepositoriesUrl() *NodeInfoRepositoriesUrl { return r } + +// false diff --git a/typedapi/types/nodeinfoscript.go b/typedapi/types/nodeinfoscript.go index 31c3f614a2..b2e2b6b73e 100644 --- a/typedapi/types/nodeinfoscript.go +++ b/typedapi/types/nodeinfoscript.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L284-L287 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L289-L292 type NodeInfoScript struct { AllowedTypes string `json:"allowed_types"` DisableMaxCompilationsRate *string `json:"disable_max_compilations_rate,omitempty"` @@ -87,3 +87,5 @@ func NewNodeInfoScript() *NodeInfoScript { return r } + +// false diff --git a/typedapi/types/nodeinfosearch.go b/typedapi/types/nodeinfosearch.go index 853df2341a..bdab77520a 100644 --- a/typedapi/types/nodeinfosearch.go +++ b/typedapi/types/nodeinfosearch.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L289-L291 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L294-L296 type NodeInfoSearch struct { Remote NodeInfoSearchRemote `json:"remote"` } @@ -33,3 +33,5 @@ func NewNodeInfoSearch() *NodeInfoSearch { return r } + +// false diff --git a/typedapi/types/nodeinfosearchremote.go b/typedapi/types/nodeinfosearchremote.go index 683fe53149..31f33b5cd6 100644 --- a/typedapi/types/nodeinfosearchremote.go +++ b/typedapi/types/nodeinfosearchremote.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoSearchRemote type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L293-L295 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L298-L300 type NodeInfoSearchRemote struct { Connect string `json:"connect"` } @@ -74,3 +74,5 @@ func NewNodeInfoSearchRemote() *NodeInfoSearchRemote { return r } + +// false diff --git a/typedapi/types/nodeinfosettings.go b/typedapi/types/nodeinfosettings.go index 174e48536b..ac081921b2 100644 --- a/typedapi/types/nodeinfosettings.go +++ b/typedapi/types/nodeinfosettings.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L69-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L69-L85 type NodeInfoSettings struct { Action *NodeInfoAction `json:"action,omitempty"` Bootstrap *NodeInfoBootstrap `json:"bootstrap,omitempty"` @@ -47,3 +47,5 @@ func NewNodeInfoSettings() *NodeInfoSettings { return r } + +// false diff --git a/typedapi/types/nodeinfosettingscluster.go b/typedapi/types/nodeinfosettingscluster.go index 6b1dc1e50b..5ae25a2ad2 100644 --- a/typedapi/types/nodeinfosettingscluster.go +++ b/typedapi/types/nodeinfosettingscluster.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // NodeInfoSettingsCluster type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L132-L142 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L132-L142 type NodeInfoSettingsCluster struct { DeprecationIndexing *DeprecationIndexing `json:"deprecation_indexing,omitempty"` Election NodeInfoSettingsClusterElection `json:"election"` @@ -90,3 +90,5 @@ func NewNodeInfoSettingsCluster() *NodeInfoSettingsCluster { return r } + +// false diff --git a/typedapi/types/nodeinfosettingsclusterelection.go b/typedapi/types/nodeinfosettingsclusterelection.go index 463c882aba..28cf422770 100644 --- a/typedapi/types/nodeinfosettingsclusterelection.go +++ b/typedapi/types/nodeinfosettingsclusterelection.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // NodeInfoSettingsClusterElection type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L148-L150 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L148-L150 type NodeInfoSettingsClusterElection struct { Strategy string `json:"strategy"` } @@ -66,3 +66,5 @@ func NewNodeInfoSettingsClusterElection() *NodeInfoSettingsClusterElection { return r } + +// false diff --git a/typedapi/types/nodeinfosettingshttp.go b/typedapi/types/nodeinfosettingshttp.go index 7dcdfd60f6..65f151bfcc 100644 --- a/typedapi/types/nodeinfosettingshttp.go +++ b/typedapi/types/nodeinfosettingshttp.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsHttp type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L192-L197 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L192-L197 type NodeInfoSettingsHttp struct { Compression string `json:"compression,omitempty"` Port string `json:"port,omitempty"` @@ -106,3 +106,5 @@ func NewNodeInfoSettingsHttp() *NodeInfoSettingsHttp { return r } + +// false diff --git a/typedapi/types/nodeinfosettingshttptype.go b/typedapi/types/nodeinfosettingshttptype.go index 418f674d0a..a7dfebcbe5 100644 --- a/typedapi/types/nodeinfosettingshttptype.go +++ b/typedapi/types/nodeinfosettingshttptype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsHttpType type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L199-L202 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L199-L202 type NodeInfoSettingsHttpType struct { Default string `json:"default"` } @@ -86,3 +86,5 @@ func NewNodeInfoSettingsHttpType() *NodeInfoSettingsHttpType { return r } + +// false diff --git a/typedapi/types/nodeinfosettingsingest.go b/typedapi/types/nodeinfosettingsingest.go index ebc288ee85..3cad72fd05 100644 --- a/typedapi/types/nodeinfosettingsingest.go +++ b/typedapi/types/nodeinfosettingsingest.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoSettingsIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L87-L122 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L87-L122 type NodeInfoSettingsIngest struct { Append *NodeInfoIngestInfo `json:"append,omitempty"` Attachment *NodeInfoIngestInfo `json:"attachment,omitempty"` @@ -66,3 +66,5 @@ func NewNodeInfoSettingsIngest() *NodeInfoSettingsIngest { return r } + +// false diff --git a/typedapi/types/nodeinfosettingsnetwork.go b/typedapi/types/nodeinfosettingsnetwork.go index 6cda8c436f..aa5278ea94 100644 --- a/typedapi/types/nodeinfosettingsnetwork.go +++ b/typedapi/types/nodeinfosettingsnetwork.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,9 +30,9 @@ import ( // NodeInfoSettingsNetwork type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L223-L225 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L223-L225 type NodeInfoSettingsNetwork struct { - Host *string `json:"host,omitempty"` + Host []string `json:"host,omitempty"` } func (s *NodeInfoSettingsNetwork) UnmarshalJSON(data []byte) error { @@ -51,8 +51,19 @@ func (s *NodeInfoSettingsNetwork) UnmarshalJSON(data []byte) error { switch t { case "host": - if err := dec.Decode(&s.Host); err != nil { - return fmt.Errorf("%s | %w", "Host", err) + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + s.Host = append(s.Host, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } } } @@ -66,3 +77,5 @@ func NewNodeInfoSettingsNetwork() *NodeInfoSettingsNetwork { return r } + +// false diff --git a/typedapi/types/nodeinfosettingsnode.go b/typedapi/types/nodeinfosettingsnode.go index 8846044c08..592436c5d7 100644 --- a/typedapi/types/nodeinfosettingsnode.go +++ b/typedapi/types/nodeinfosettingsnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L152-L156 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L152-L156 type NodeInfoSettingsNode struct { Attr map[string]json.RawMessage `json:"attr"` MaxLocalStorageNodes *string `json:"max_local_storage_nodes,omitempty"` @@ -86,8 +86,10 @@ func (s *NodeInfoSettingsNode) UnmarshalJSON(data []byte) error { // NewNodeInfoSettingsNode returns a NodeInfoSettingsNode. func NewNodeInfoSettingsNode() *NodeInfoSettingsNode { r := &NodeInfoSettingsNode{ - Attr: make(map[string]json.RawMessage, 0), + Attr: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/nodeinfosettingstransport.go b/typedapi/types/nodeinfosettingstransport.go index 0e9835014c..855c3d9e52 100644 --- a/typedapi/types/nodeinfosettingstransport.go +++ b/typedapi/types/nodeinfosettingstransport.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsTransport type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L208-L212 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L208-L212 type NodeInfoSettingsTransport struct { Features *NodeInfoSettingsTransportFeatures `json:"features,omitempty"` Type NodeInfoSettingsTransportType `json:"type"` @@ -86,3 +86,5 @@ func NewNodeInfoSettingsTransport() *NodeInfoSettingsTransport { return r } + +// false diff --git a/typedapi/types/nodeinfosettingstransportfeatures.go b/typedapi/types/nodeinfosettingstransportfeatures.go index 2c8c1e875e..d5feaa42bb 100644 --- a/typedapi/types/nodeinfosettingstransportfeatures.go +++ b/typedapi/types/nodeinfosettingstransportfeatures.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsTransportFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L219-L221 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L219-L221 type NodeInfoSettingsTransportFeatures struct { XPack string `json:"x-pack"` } @@ -74,3 +74,5 @@ func NewNodeInfoSettingsTransportFeatures() *NodeInfoSettingsTransportFeatures { return r } + +// false diff --git a/typedapi/types/nodeinfosettingstransporttype.go b/typedapi/types/nodeinfosettingstransporttype.go index 487d3c3dc2..eebf4cc038 100644 --- a/typedapi/types/nodeinfosettingstransporttype.go +++ b/typedapi/types/nodeinfosettingstransporttype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoSettingsTransportType type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L214-L217 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L214-L217 type NodeInfoSettingsTransportType struct { Default string `json:"default"` } @@ -86,3 +86,5 @@ func NewNodeInfoSettingsTransportType() *NodeInfoSettingsTransportType { return r } + +// false diff --git a/typedapi/types/nodeinfotransport.go b/typedapi/types/nodeinfotransport.go index 4d227c58d4..9a1803113c 100644 --- a/typedapi/types/nodeinfotransport.go +++ b/typedapi/types/nodeinfotransport.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoTransport type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L353-L357 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L358-L362 type NodeInfoTransport struct { BoundAddress []string `json:"bound_address"` Profiles map[string]string `json:"profiles"` @@ -86,8 +86,10 @@ func (s *NodeInfoTransport) UnmarshalJSON(data []byte) error { // NewNodeInfoTransport returns a NodeInfoTransport. func NewNodeInfoTransport() *NodeInfoTransport { r := &NodeInfoTransport{ - Profiles: make(map[string]string, 0), + Profiles: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodeinfoxpack.go b/typedapi/types/nodeinfoxpack.go index 646b060afa..91b1a8cf2a 100644 --- a/typedapi/types/nodeinfoxpack.go +++ b/typedapi/types/nodeinfoxpack.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,9 +26,10 @@ import ( // NodeInfoXpack type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L239-L243 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L239-L244 type NodeInfoXpack struct { License *NodeInfoXpackLicense `json:"license,omitempty"` + Ml *NodeInfoXpackMl `json:"ml,omitempty"` Notification map[string]json.RawMessage `json:"notification,omitempty"` Security NodeInfoXpackSecurity `json:"security"` } @@ -36,8 +37,10 @@ type NodeInfoXpack struct { // NewNodeInfoXpack returns a NodeInfoXpack. func NewNodeInfoXpack() *NodeInfoXpack { r := &NodeInfoXpack{ - Notification: make(map[string]json.RawMessage, 0), + Notification: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/nodeinfoxpacklicense.go b/typedapi/types/nodeinfoxpacklicense.go index c0f4c5d7d5..a805b6ec8b 100644 --- a/typedapi/types/nodeinfoxpacklicense.go +++ b/typedapi/types/nodeinfoxpacklicense.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoXpackLicense type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L276-L278 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L281-L283 type NodeInfoXpackLicense struct { SelfGenerated NodeInfoXpackLicenseType `json:"self_generated"` } @@ -33,3 +33,5 @@ func NewNodeInfoXpackLicense() *NodeInfoXpackLicense { return r } + +// false diff --git a/typedapi/types/nodeinfoxpacklicensetype.go b/typedapi/types/nodeinfoxpacklicensetype.go index 4b5a49689a..eec7fdca02 100644 --- a/typedapi/types/nodeinfoxpacklicensetype.go +++ b/typedapi/types/nodeinfoxpacklicensetype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoXpackLicenseType type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L280-L282 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L285-L287 type NodeInfoXpackLicenseType struct { Type string `json:"type"` } @@ -74,3 +74,5 @@ func NewNodeInfoXpackLicenseType() *NodeInfoXpackLicenseType { return r } + +// false diff --git a/typedapi/types/deleteinferenceendpointresult.go b/typedapi/types/nodeinfoxpackml.go similarity index 55% rename from typedapi/types/deleteinferenceendpointresult.go rename to typedapi/types/nodeinfoxpackml.go index de4d718439..02bf304917 100644 --- a/typedapi/types/deleteinferenceendpointresult.go +++ b/typedapi/types/nodeinfoxpackml.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,17 +29,14 @@ import ( "strconv" ) -// DeleteInferenceEndpointResult type. +// NodeInfoXpackMl type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Results.ts#L91-L96 -type DeleteInferenceEndpointResult struct { - // Acknowledged For a successful response, this value is always true. On failure, an - // exception is returned instead. - Acknowledged bool `json:"acknowledged"` - Pipelines []string `json:"pipelines"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L253-L255 +type NodeInfoXpackMl struct { + UseAutoMachineMemoryPercent *bool `json:"use_auto_machine_memory_percent,omitempty"` } -func (s *DeleteInferenceEndpointResult) UnmarshalJSON(data []byte) error { +func (s *NodeInfoXpackMl) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -54,23 +51,18 @@ func (s *DeleteInferenceEndpointResult) UnmarshalJSON(data []byte) error { switch t { - case "acknowledged": + case "use_auto_machine_memory_percent": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseBool(v) if err != nil { - return fmt.Errorf("%s | %w", "Acknowledged", err) + return fmt.Errorf("%s | %w", "UseAutoMachineMemoryPercent", err) } - s.Acknowledged = value + s.UseAutoMachineMemoryPercent = &value case bool: - s.Acknowledged = v - } - - case "pipelines": - if err := dec.Decode(&s.Pipelines); err != nil { - return fmt.Errorf("%s | %w", "Pipelines", err) + s.UseAutoMachineMemoryPercent = &v } } @@ -78,9 +70,11 @@ func (s *DeleteInferenceEndpointResult) UnmarshalJSON(data []byte) error { return nil } -// NewDeleteInferenceEndpointResult returns a DeleteInferenceEndpointResult. -func NewDeleteInferenceEndpointResult() *DeleteInferenceEndpointResult { - r := &DeleteInferenceEndpointResult{} +// NewNodeInfoXpackMl returns a NodeInfoXpackMl. +func NewNodeInfoXpackMl() *NodeInfoXpackMl { + r := &NodeInfoXpackMl{} return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurity.go b/typedapi/types/nodeinfoxpacksecurity.go index 22ee8b2851..d124a60764 100644 --- a/typedapi/types/nodeinfoxpacksecurity.go +++ b/typedapi/types/nodeinfoxpacksecurity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,11 @@ import ( // NodeInfoXpackSecurity type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L245-L250 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L246-L251 type NodeInfoXpackSecurity struct { Authc *NodeInfoXpackSecurityAuthc `json:"authc,omitempty"` Enabled string `json:"enabled"` - Http NodeInfoXpackSecuritySsl `json:"http"` + Http *NodeInfoXpackSecuritySsl `json:"http,omitempty"` Transport *NodeInfoXpackSecuritySsl `json:"transport,omitempty"` } @@ -92,3 +92,5 @@ func NewNodeInfoXpackSecurity() *NodeInfoXpackSecurity { return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityauthc.go b/typedapi/types/nodeinfoxpacksecurityauthc.go index 3c06b6e268..d11d3393a9 100644 --- a/typedapi/types/nodeinfoxpacksecurityauthc.go +++ b/typedapi/types/nodeinfoxpacksecurityauthc.go @@ -16,16 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoXpackSecurityAuthc type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L256-L259 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L261-L264 type NodeInfoXpackSecurityAuthc struct { - Realms NodeInfoXpackSecurityAuthcRealms `json:"realms"` - Token NodeInfoXpackSecurityAuthcToken `json:"token"` + Realms *NodeInfoXpackSecurityAuthcRealms `json:"realms,omitempty"` + Token *NodeInfoXpackSecurityAuthcToken `json:"token,omitempty"` } // NewNodeInfoXpackSecurityAuthc returns a NodeInfoXpackSecurityAuthc. @@ -34,3 +34,5 @@ func NewNodeInfoXpackSecurityAuthc() *NodeInfoXpackSecurityAuthc { return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityauthcrealms.go b/typedapi/types/nodeinfoxpacksecurityauthcrealms.go index f4b7224ad7..e0e1dee071 100644 --- a/typedapi/types/nodeinfoxpacksecurityauthcrealms.go +++ b/typedapi/types/nodeinfoxpacksecurityauthcrealms.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoXpackSecurityAuthcRealms type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L261-L265 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L266-L270 type NodeInfoXpackSecurityAuthcRealms struct { File map[string]NodeInfoXpackSecurityAuthcRealmsStatus `json:"file,omitempty"` Native map[string]NodeInfoXpackSecurityAuthcRealmsStatus `json:"native,omitempty"` @@ -32,10 +32,12 @@ type NodeInfoXpackSecurityAuthcRealms struct { // NewNodeInfoXpackSecurityAuthcRealms returns a NodeInfoXpackSecurityAuthcRealms. func NewNodeInfoXpackSecurityAuthcRealms() *NodeInfoXpackSecurityAuthcRealms { r := &NodeInfoXpackSecurityAuthcRealms{ - File: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus, 0), - Native: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus, 0), - Pki: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus, 0), + File: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus), + Native: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus), + Pki: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus), } return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go b/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go index 661b63c7a2..8a5f86b1c9 100644 --- a/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go +++ b/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoXpackSecurityAuthcRealmsStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L271-L274 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L276-L279 type NodeInfoXpackSecurityAuthcRealmsStatus struct { Enabled *string `json:"enabled,omitempty"` Order string `json:"order"` @@ -87,3 +87,5 @@ func NewNodeInfoXpackSecurityAuthcRealmsStatus() *NodeInfoXpackSecurityAuthcReal return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityauthctoken.go b/typedapi/types/nodeinfoxpacksecurityauthctoken.go index b47742e5b0..1dfc78a5b4 100644 --- a/typedapi/types/nodeinfoxpacksecurityauthctoken.go +++ b/typedapi/types/nodeinfoxpacksecurityauthctoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeInfoXpackSecurityAuthcToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L267-L269 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L272-L274 type NodeInfoXpackSecurityAuthcToken struct { Enabled string `json:"enabled"` } @@ -74,3 +74,5 @@ func NewNodeInfoXpackSecurityAuthcToken() *NodeInfoXpackSecurityAuthcToken { return r } + +// false diff --git a/typedapi/types/nodeinfoxpacksecurityssl.go b/typedapi/types/nodeinfoxpacksecurityssl.go index 479cc55cfb..8615bd6305 100644 --- a/typedapi/types/nodeinfoxpacksecurityssl.go +++ b/typedapi/types/nodeinfoxpacksecurityssl.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodeInfoXpackSecuritySsl type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L252-L254 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L257-L259 type NodeInfoXpackSecuritySsl struct { Ssl map[string]string `json:"ssl"` } @@ -30,8 +30,10 @@ type NodeInfoXpackSecuritySsl struct { // NewNodeInfoXpackSecuritySsl returns a NodeInfoXpackSecuritySsl. func NewNodeInfoXpackSecuritySsl() *NodeInfoXpackSecuritySsl { r := &NodeInfoXpackSecuritySsl{ - Ssl: make(map[string]string, 0), + Ssl: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodejvminfo.go b/typedapi/types/nodejvminfo.go index 0c706797d2..72c7c58e88 100644 --- a/typedapi/types/nodejvminfo.go +++ b/typedapi/types/nodejvminfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeJvmInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L359-L373 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L364-L378 type NodeJvmInfo struct { GcCollectors []string `json:"gc_collectors"` InputArguments []string `json:"input_arguments"` @@ -167,3 +167,5 @@ func NewNodeJvmInfo() *NodeJvmInfo { return r } + +// false diff --git a/typedapi/types/nodeoperatingsysteminfo.go b/typedapi/types/nodeoperatingsysteminfo.go index 90b3fb808c..9314dbdc25 100644 --- a/typedapi/types/nodeoperatingsysteminfo.go +++ b/typedapi/types/nodeoperatingsysteminfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeOperatingSystemInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L375-L392 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L380-L397 type NodeOperatingSystemInfo struct { // AllocatedProcessors The number of processors actually used to calculate thread pool size. This // number can be set with the node.processors setting of a node and defaults to @@ -158,3 +158,5 @@ func NewNodeOperatingSystemInfo() *NodeOperatingSystemInfo { return r } + +// false diff --git a/typedapi/types/nodepackagingtype.go b/typedapi/types/nodepackagingtype.go index 1cd1f2f09a..b63caea072 100644 --- a/typedapi/types/nodepackagingtype.go +++ b/typedapi/types/nodepackagingtype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodePackagingType type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L526-L539 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L526-L539 type NodePackagingType struct { // Count Number of selected nodes using the distribution flavor and file type. Count int `json:"count"` @@ -107,3 +107,5 @@ func NewNodePackagingType() *NodePackagingType { return r } + +// false diff --git a/typedapi/types/nodeprocessinfo.go b/typedapi/types/nodeprocessinfo.go index 6cb5b9719f..4c8696c5d0 100644 --- a/typedapi/types/nodeprocessinfo.go +++ b/typedapi/types/nodeprocessinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeProcessInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L394-L401 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L399-L406 type NodeProcessInfo struct { // Id Process identifier (PID) Id int64 `json:"id"` @@ -101,3 +101,5 @@ func NewNodeProcessInfo() *NodeProcessInfo { return r } + +// false diff --git a/typedapi/types/nodereloadresult.go b/typedapi/types/nodereloadresult.go index 1553d98c3c..78295a245c 100644 --- a/typedapi/types/nodereloadresult.go +++ b/typedapi/types/nodereloadresult.go @@ -16,14 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types -// NodeReloadResult holds the union for the following types: -// -// Stats -// NodeReloadError +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeReloadResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/NodeReloadResult.ts#L29-L30 -type NodeReloadResult any +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/NodeReloadResult.ts#L23-L26 +type NodeReloadResult struct { + Name string `json:"name"` + ReloadException *ErrorCause `json:"reload_exception,omitempty"` +} + +func (s *NodeReloadResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "reload_exception": + if err := dec.Decode(&s.ReloadException); err != nil { + return fmt.Errorf("%s | %w", "ReloadException", err) + } + + } + } + return nil +} + +// NewNodeReloadResult returns a NodeReloadResult. +func NewNodeReloadResult() *NodeReloadResult { + r := &NodeReloadResult{} + + return r +} + +// false diff --git a/typedapi/types/nodescontext.go b/typedapi/types/nodescontext.go index 7bcf4ba116..5bc4d0460a 100644 --- a/typedapi/types/nodescontext.go +++ b/typedapi/types/nodescontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodesContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L997-L1002 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L1068-L1073 type NodesContext struct { CacheEvictions *int64 `json:"cache_evictions,omitempty"` CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` @@ -122,3 +122,5 @@ func NewNodesContext() *NodesContext { return r } + +// false diff --git a/typedapi/types/nodescredentials.go b/typedapi/types/nodescredentials.go index ca140f36c1..0fe13b8166 100644 --- a/typedapi/types/nodescredentials.go +++ b/typedapi/types/nodescredentials.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodesCredentials type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_service_credentials/types.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_service_credentials/types.ts#L23-L28 type NodesCredentials struct { // FileTokens File-backed tokens collected from all nodes FileTokens map[string]NodesCredentialsFileToken `json:"file_tokens"` @@ -33,8 +33,10 @@ type NodesCredentials struct { // NewNodesCredentials returns a NodesCredentials. func NewNodesCredentials() *NodesCredentials { r := &NodesCredentials{ - FileTokens: make(map[string]NodesCredentialsFileToken, 0), + FileTokens: make(map[string]NodesCredentialsFileToken), } return r } + +// false diff --git a/typedapi/types/nodescredentialsfiletoken.go b/typedapi/types/nodescredentialsfiletoken.go index 89dfb2edb0..9c57d60cda 100644 --- a/typedapi/types/nodescredentialsfiletoken.go +++ b/typedapi/types/nodescredentialsfiletoken.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodesCredentialsFileToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_service_credentials/types.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_service_credentials/types.ts#L30-L32 type NodesCredentialsFileToken struct { Nodes []string `json:"nodes"` } @@ -33,3 +33,5 @@ func NewNodesCredentialsFileToken() *NodesCredentialsFileToken { return r } + +// false diff --git a/typedapi/types/nodeshard.go b/typedapi/types/nodeshard.go index da72c9165e..2f0ed33852 100644 --- a/typedapi/types/nodeshard.go +++ b/typedapi/types/nodeshard.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // NodeShard type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Node.ts#L60-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Node.ts#L54-L65 type NodeShard struct { AllocationId map[string]string `json:"allocation_id,omitempty"` Index string `json:"index"` @@ -146,9 +146,11 @@ func (s *NodeShard) UnmarshalJSON(data []byte) error { // NewNodeShard returns a NodeShard. func NewNodeShard() *NodeShard { r := &NodeShard{ - AllocationId: make(map[string]string, 0), - RecoverySource: make(map[string]string, 0), + AllocationId: make(map[string]string), + RecoverySource: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/nodeshutdownstatus.go b/typedapi/types/nodeshutdownstatus.go index e878f82365..70b101085a 100644 --- a/typedapi/types/nodeshutdownstatus.go +++ b/typedapi/types/nodeshutdownstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // NodeShutdownStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L29-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L29-L38 type NodeShutdownStatus struct { NodeId string `json:"node_id"` PersistentTasks PersistentTaskStatus `json:"persistent_tasks"` @@ -119,3 +119,5 @@ func NewNodeShutdownStatus() *NodeShutdownStatus { return r } + +// false diff --git a/typedapi/types/nodesindexingpressure.go b/typedapi/types/nodesindexingpressure.go index 2d5c901e0c..ce2be660a0 100644 --- a/typedapi/types/nodesindexingpressure.go +++ b/typedapi/types/nodesindexingpressure.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodesIndexingPressure type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L116-L121 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L116-L121 type NodesIndexingPressure struct { // Memory Contains statistics for memory consumption from indexing load. Memory *NodesIndexingPressureMemory `json:"memory,omitempty"` @@ -34,3 +34,5 @@ func NewNodesIndexingPressure() *NodesIndexingPressure { return r } + +// false diff --git a/typedapi/types/nodesindexingpressurememory.go b/typedapi/types/nodesindexingpressurememory.go index a506b3f66c..b85e9e17ec 100644 --- a/typedapi/types/nodesindexingpressurememory.go +++ b/typedapi/types/nodesindexingpressurememory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodesIndexingPressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L123-L142 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L123-L142 type NodesIndexingPressureMemory struct { // Current Contains statistics for current indexing load. Current *PressureMemory `json:"current,omitempty"` @@ -101,3 +101,5 @@ func NewNodesIndexingPressureMemory() *NodesIndexingPressureMemory { return r } + +// false diff --git a/typedapi/types/nodesingest.go b/typedapi/types/nodesingest.go index 6afbc628a9..db0d21f373 100644 --- a/typedapi/types/nodesingest.go +++ b/typedapi/types/nodesingest.go @@ -16,16 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // NodesIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L345-L354 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L345-L354 type NodesIngest struct { // Pipelines Contains statistics about ingest pipelines for the node. - Pipelines map[string]IngestTotal `json:"pipelines,omitempty"` + Pipelines map[string]IngestStats `json:"pipelines,omitempty"` // Total Contains statistics about ingest operations for the node. Total *IngestTotal `json:"total,omitempty"` } @@ -33,8 +33,10 @@ type NodesIngest struct { // NewNodesIngest returns a NodesIngest. func NewNodesIngest() *NodesIngest { r := &NodesIngest{ - Pipelines: make(map[string]IngestTotal, 0), + Pipelines: make(map[string]IngestStats), } return r } + +// false diff --git a/typedapi/types/nodesrecord.go b/typedapi/types/nodesrecord.go index bd7563e949..42b6b593ca 100644 --- a/typedapi/types/nodesrecord.go +++ b/typedapi/types/nodesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/nodes/types.ts#L23-L542 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/nodes/types.ts#L23-L542 type NodesRecord struct { // Build The Elasticsearch build hash. Build *string `json:"build,omitempty"` @@ -1342,3 +1342,5 @@ func NewNodesRecord() *NodesRecord { return r } + +// false diff --git a/typedapi/types/nodestatistics.go b/typedapi/types/nodestatistics.go index efd0e53afb..4714aa06e8 100644 --- a/typedapi/types/nodestatistics.go +++ b/typedapi/types/nodestatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Node.ts#L28-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Node.ts#L28-L39 type NodeStatistics struct { // Failed Number of nodes that rejected the request or failed to respond. If this value // is not 0, a reason for the rejection or failure is included in the response. @@ -122,3 +122,5 @@ func NewNodeStatistics() *NodeStatistics { return r } + +// false diff --git a/typedapi/types/nodetasks.go b/typedapi/types/nodetasks.go index 87ad0772ea..59a90a6cb2 100644 --- a/typedapi/types/nodetasks.go +++ b/typedapi/types/nodetasks.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // NodeTasks type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/tasks/_types/TaskListResponseBase.ts#L49-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/tasks/_types/TaskListResponseBase.ts#L49-L57 type NodeTasks struct { Attributes map[string]string `json:"attributes,omitempty"` Host *string `json:"host,omitempty"` @@ -105,9 +105,11 @@ func (s *NodeTasks) UnmarshalJSON(data []byte) error { // NewNodeTasks returns a NodeTasks. func NewNodeTasks() *NodeTasks { r := &NodeTasks{ - Attributes: make(map[string]string, 0), - Tasks: make(map[string]TaskInfo, 0), + Attributes: make(map[string]string), + Tasks: make(map[string]TaskInfo), } return r } + +// false diff --git a/typedapi/types/nodethreadpoolinfo.go b/typedapi/types/nodethreadpoolinfo.go index 0a68896283..d770f25803 100644 --- a/typedapi/types/nodethreadpoolinfo.go +++ b/typedapi/types/nodethreadpoolinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // NodeThreadPoolInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/info/types.ts#L297-L304 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/info/types.ts#L302-L309 type NodeThreadPoolInfo struct { Core *int `json:"core,omitempty"` KeepAlive Duration `json:"keep_alive,omitempty"` @@ -148,3 +148,5 @@ func NewNodeThreadPoolInfo() *NodeThreadPoolInfo { return r } + +// false diff --git a/typedapi/types/nodeusage.go b/typedapi/types/nodeusage.go index a2027957ee..51b583e043 100644 --- a/typedapi/types/nodeusage.go +++ b/typedapi/types/nodeusage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // NodeUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/usage/types.ts#L25-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/usage/types.ts#L25-L30 type NodeUsage struct { Aggregations map[string]json.RawMessage `json:"aggregations"` RestActions map[string]int `json:"rest_actions"` @@ -87,9 +87,11 @@ func (s *NodeUsage) UnmarshalJSON(data []byte) error { // NewNodeUsage returns a NodeUsage. func NewNodeUsage() *NodeUsage { r := &NodeUsage{ - Aggregations: make(map[string]json.RawMessage, 0), - RestActions: make(map[string]int, 0), + Aggregations: make(map[string]json.RawMessage), + RestActions: make(map[string]int), } return r } + +// false diff --git a/typedapi/types/norianalyzer.go b/typedapi/types/norianalyzer.go index 516194c33a..88b2f036e4 100644 --- a/typedapi/types/norianalyzer.go +++ b/typedapi/types/norianalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // NoriAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L66-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L303-L309 type NoriAnalyzer struct { DecompoundMode *noridecompoundmode.NoriDecompoundMode `json:"decompound_mode,omitempty"` Stoptags []string `json:"stoptags,omitempty"` @@ -116,3 +116,13 @@ func NewNoriAnalyzer() *NoriAnalyzer { return r } + +// true + +type NoriAnalyzerVariant interface { + NoriAnalyzerCaster() *NoriAnalyzer +} + +func (s *NoriAnalyzer) NoriAnalyzerCaster() *NoriAnalyzer { + return s +} diff --git a/typedapi/types/noripartofspeechtokenfilter.go b/typedapi/types/noripartofspeechtokenfilter.go index 086049652e..c35c659703 100644 --- a/typedapi/types/noripartofspeechtokenfilter.go +++ b/typedapi/types/noripartofspeechtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // NoriPartOfSpeechTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L275-L278 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L274-L277 type NoriPartOfSpeechTokenFilter struct { Stoptags []string `json:"stoptags,omitempty"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewNoriPartOfSpeechTokenFilter() *NoriPartOfSpeechTokenFilter { return r } + +// true + +type NoriPartOfSpeechTokenFilterVariant interface { + NoriPartOfSpeechTokenFilterCaster() *NoriPartOfSpeechTokenFilter +} + +func (s *NoriPartOfSpeechTokenFilter) NoriPartOfSpeechTokenFilterCaster() *NoriPartOfSpeechTokenFilter { + return s +} diff --git a/typedapi/types/noritokenizer.go b/typedapi/types/noritokenizer.go index 46eda6d1ee..f1c69e19b5 100644 --- a/typedapi/types/noritokenizer.go +++ b/typedapi/types/noritokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // NoriTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L81-L87 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/nori-plugin.ts#L28-L34 type NoriTokenizer struct { DecompoundMode *noridecompoundmode.NoriDecompoundMode `json:"decompound_mode,omitempty"` DiscardPunctuation *bool `json:"discard_punctuation,omitempty"` @@ -132,3 +132,13 @@ func NewNoriTokenizer() *NoriTokenizer { return r } + +// true + +type NoriTokenizerVariant interface { + NoriTokenizerCaster() *NoriTokenizer +} + +func (s *NoriTokenizer) NoriTokenizerCaster() *NoriTokenizer { + return s +} diff --git a/typedapi/types/normalizeaggregation.go b/typedapi/types/normalizeaggregation.go index 0a3deec3f4..f1bf541603 100644 --- a/typedapi/types/normalizeaggregation.go +++ b/typedapi/types/normalizeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // NormalizeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L319-L324 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L351-L359 type NormalizeAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -101,3 +101,13 @@ func NewNormalizeAggregation() *NormalizeAggregation { return r } + +// true + +type NormalizeAggregationVariant interface { + NormalizeAggregationCaster() *NormalizeAggregation +} + +func (s *NormalizeAggregation) NormalizeAggregationCaster() *NormalizeAggregation { + return s +} diff --git a/typedapi/types/normalizer.go b/typedapi/types/normalizer.go index 69df1a1314..9bb46dc42e 100644 --- a/typedapi/types/normalizer.go +++ b/typedapi/types/normalizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // LowercaseNormalizer // CustomNormalizer // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/normalizers.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/normalizers.ts#L20-L24 type Normalizer any + +type NormalizerVariant interface { + NormalizerCaster() *Normalizer +} diff --git a/typedapi/types/norwegiananalyzer.go b/typedapi/types/norwegiananalyzer.go new file mode 100644 index 0000000000..e2dca44ae0 --- /dev/null +++ b/typedapi/types/norwegiananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NorwegianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L228-L233 +type NorwegianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *NorwegianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NorwegianAnalyzer) MarshalJSON() ([]byte, error) { + type innerNorwegianAnalyzer NorwegianAnalyzer + tmp := innerNorwegianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "norwegian" + + return json.Marshal(tmp) +} + +// NewNorwegianAnalyzer returns a NorwegianAnalyzer. +func NewNorwegianAnalyzer() *NorwegianAnalyzer { + r := &NorwegianAnalyzer{} + + return r +} + +// true + +type NorwegianAnalyzerVariant interface { + NorwegianAnalyzerCaster() *NorwegianAnalyzer +} + +func (s *NorwegianAnalyzer) NorwegianAnalyzerCaster() *NorwegianAnalyzer { + return s +} diff --git a/typedapi/types/numberrangequery.go b/typedapi/types/numberrangequery.go index 79040177b3..dd857893ae 100644 --- a/typedapi/types/numberrangequery.go +++ b/typedapi/types/numberrangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // NumberRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L157-L157 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L172-L172 type NumberRangeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -189,3 +189,13 @@ func NewNumberRangeQuery() *NumberRangeQuery { return r } + +// true + +type NumberRangeQueryVariant interface { + NumberRangeQueryCaster() *NumberRangeQuery +} + +func (s *NumberRangeQuery) NumberRangeQueryCaster() *NumberRangeQuery { + return s +} diff --git a/typedapi/types/numericdecayfunction.go b/typedapi/types/numericdecayfunction.go index 992c59bf43..1028a20291 100644 --- a/typedapi/types/numericdecayfunction.go +++ b/typedapi/types/numericdecayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,7 +29,7 @@ import ( // NumericDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L195-L195 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L208-L208 type NumericDecayFunction struct { DecayFunctionBasedoubledouble map[string]DecayPlacementdoubledouble `json:"-"` // MultiValueMode Determines how the distance is calculated when a field used for computing the @@ -69,8 +69,18 @@ func (s NumericDecayFunction) MarshalJSON() ([]byte, error) { // NewNumericDecayFunction returns a NumericDecayFunction. func NewNumericDecayFunction() *NumericDecayFunction { r := &NumericDecayFunction{ - DecayFunctionBasedoubledouble: make(map[string]DecayPlacementdoubledouble, 0), + DecayFunctionBasedoubledouble: make(map[string]DecayPlacementdoubledouble), } return r } + +// true + +type NumericDecayFunctionVariant interface { + NumericDecayFunctionCaster() *NumericDecayFunction +} + +func (s *NumericDecayFunction) NumericDecayFunctionCaster() *NumericDecayFunction { + return s +} diff --git a/typedapi/types/numericfielddata.go b/typedapi/types/numericfielddata.go index 56c49cd2e8..2d034de12a 100644 --- a/typedapi/types/numericfielddata.go +++ b/typedapi/types/numericfielddata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // NumericFielddata type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/NumericFielddata.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/NumericFielddata.ts#L22-L24 type NumericFielddata struct { Format numericfielddataformat.NumericFielddataFormat `json:"format"` } @@ -37,3 +37,13 @@ func NewNumericFielddata() *NumericFielddata { return r } + +// true + +type NumericFielddataVariant interface { + NumericFielddataCaster() *NumericFielddata +} + +func (s *NumericFielddata) NumericFielddataCaster() *NumericFielddata { + return s +} diff --git a/typedapi/types/objectproperty.go b/typedapi/types/objectproperty.go index 4f3b50e240..9a5b19ad47 100644 --- a/typedapi/types/objectproperty.go +++ b/typedapi/types/objectproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,13 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/subobjects" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // ObjectProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/complex.ts#L46-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/complex.ts#L46-L50 type ObjectProperty struct { CopyTo []string `json:"copy_to,omitempty"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` @@ -41,12 +43,12 @@ type ObjectProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Subobjects *bool `json:"subobjects,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + Subobjects *subobjects.Subobjects `json:"subobjects,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *ObjectProperty) UnmarshalJSON(data []byte) error { @@ -118,301 +120,313 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -461,318 +475,318 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -788,17 +802,13 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { } case "subobjects": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Subobjects", err) - } - s.Subobjects = &value - case bool: - s.Subobjects = &v + if err := dec.Decode(&s.Subobjects); err != nil { + return fmt.Errorf("%s | %w", "Subobjects", err) + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) } case "type": @@ -815,17 +825,17 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { func (s ObjectProperty) MarshalJSON() ([]byte, error) { type innerObjectProperty ObjectProperty tmp := innerObjectProperty{ - CopyTo: s.CopyTo, - Dynamic: s.Dynamic, - Enabled: s.Enabled, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Subobjects: s.Subobjects, - Type: s.Type, + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + Subobjects: s.Subobjects, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "object" @@ -836,10 +846,20 @@ func (s ObjectProperty) MarshalJSON() ([]byte, error) { // NewObjectProperty returns a ObjectProperty. func NewObjectProperty() *ObjectProperty { r := &ObjectProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ObjectPropertyVariant interface { + ObjectPropertyCaster() *ObjectProperty +} + +func (s *ObjectProperty) ObjectPropertyCaster() *ObjectProperty { + return s +} diff --git a/typedapi/types/onehotencodingpreprocessor.go b/typedapi/types/onehotencodingpreprocessor.go index 17c7c0a0a9..8a36c4341b 100644 --- a/typedapi/types/onehotencodingpreprocessor.go +++ b/typedapi/types/onehotencodingpreprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // OneHotEncodingPreprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L44-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L44-L47 type OneHotEncodingPreprocessor struct { Field string `json:"field"` HotMap map[string]string `json:"hot_map"` @@ -80,8 +80,18 @@ func (s *OneHotEncodingPreprocessor) UnmarshalJSON(data []byte) error { // NewOneHotEncodingPreprocessor returns a OneHotEncodingPreprocessor. func NewOneHotEncodingPreprocessor() *OneHotEncodingPreprocessor { r := &OneHotEncodingPreprocessor{ - HotMap: make(map[string]string, 0), + HotMap: make(map[string]string), } return r } + +// true + +type OneHotEncodingPreprocessorVariant interface { + OneHotEncodingPreprocessorCaster() *OneHotEncodingPreprocessor +} + +func (s *OneHotEncodingPreprocessor) OneHotEncodingPreprocessorCaster() *OneHotEncodingPreprocessor { + return s +} diff --git a/typedapi/types/openaiservicesettings.go b/typedapi/types/openaiservicesettings.go new file mode 100644 index 0000000000..9734ba5e80 --- /dev/null +++ b/typedapi/types/openaiservicesettings.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OpenAIServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_openai/PutOpenAiRequest.ts#L94-L136 +type OpenAIServiceSettings struct { + // ApiKey A valid API key of your OpenAI account. + // You can find your OpenAI API keys in your OpenAI account under the API keys + // section. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // Dimensions The number of dimensions the resulting output embeddings should have. + // It is supported only in `text-embedding-3` and later models. + // If it is not set, the OpenAI defined default for the model is used. + Dimensions *int `json:"dimensions,omitempty"` + // ModelId The name of the model to use for the inference task. + // Refer to the OpenAI documentation for the list of available text embedding + // models. + ModelId string `json:"model_id"` + // OrganizationId The unique identifier for your organization. + // You can find the Organization ID in your OpenAI account under *Settings > + // Organizations*. + OrganizationId *string `json:"organization_id,omitempty"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // OpenAI. + // The `openai` service sets a default number of requests allowed per minute + // depending on the task type. + // For `text_embedding`, it is set to `3000`. + // For `completion`, it is set to `500`. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Url The URL endpoint to use for the requests. + // It can be changed for testing purposes. + Url *string `json:"url,omitempty"` +} + +func (s *OpenAIServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "dimensions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dimensions", err) + } + s.Dimensions = &value + case float64: + f := int(v) + s.Dimensions = &f + } + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "organization_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OrganizationId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OrganizationId = &o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + +// NewOpenAIServiceSettings returns a OpenAIServiceSettings. +func NewOpenAIServiceSettings() *OpenAIServiceSettings { + r := &OpenAIServiceSettings{} + + return r +} + +// true + +type OpenAIServiceSettingsVariant interface { + OpenAIServiceSettingsCaster() *OpenAIServiceSettings +} + +func (s *OpenAIServiceSettings) OpenAIServiceSettingsCaster() *OpenAIServiceSettings { + return s +} diff --git a/typedapi/types/openaitasksettings.go b/typedapi/types/openaitasksettings.go new file mode 100644 index 0000000000..a0249d1bc5 --- /dev/null +++ b/typedapi/types/openaitasksettings.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OpenAITaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_openai/PutOpenAiRequest.ts#L138-L144 +type OpenAITaskSettings struct { + // User For a `completion` or `text_embedding` task, specify the user issuing the + // request. + // This informaiton can be used for abuse detection. + User *string `json:"user,omitempty"` +} + +func (s *OpenAITaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "user": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "User", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.User = &o + + } + } + return nil +} + +// NewOpenAITaskSettings returns a OpenAITaskSettings. +func NewOpenAITaskSettings() *OpenAITaskSettings { + r := &OpenAITaskSettings{} + + return r +} + +// true + +type OpenAITaskSettingsVariant interface { + OpenAITaskSettingsCaster() *OpenAITaskSettings +} + +func (s *OpenAITaskSettings) OpenAITaskSettingsCaster() *OpenAITaskSettings { + return s +} diff --git a/typedapi/types/operatingsystem.go b/typedapi/types/operatingsystem.go index cfa53ca947..cea8172e23 100644 --- a/typedapi/types/operatingsystem.go +++ b/typedapi/types/operatingsystem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // OperatingSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L945-L951 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L1016-L1022 type OperatingSystem struct { Cgroup *Cgroup `json:"cgroup,omitempty"` Cpu *Cpu `json:"cpu,omitempty"` @@ -101,3 +101,5 @@ func NewOperatingSystem() *OperatingSystem { return r } + +// false diff --git a/typedapi/types/operatingsystemmemoryinfo.go b/typedapi/types/operatingsystemmemoryinfo.go index 7bdbd73f2a..6376484142 100644 --- a/typedapi/types/operatingsystemmemoryinfo.go +++ b/typedapi/types/operatingsystemmemoryinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // OperatingSystemMemoryInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/stats/types.ts#L541-L568 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/stats/types.ts#L541-L568 type OperatingSystemMemoryInfo struct { // AdjustedTotalInBytes Total amount, in bytes, of memory across all selected nodes, but using the // value specified using the `es.total_memory_bytes` system property instead of @@ -167,3 +167,5 @@ func NewOperatingSystemMemoryInfo() *OperatingSystemMemoryInfo { return r } + +// false diff --git a/typedapi/types/operationcontainer.go b/typedapi/types/operationcontainer.go index aa640200bc..d6539491b2 100644 --- a/typedapi/types/operationcontainer.go +++ b/typedapi/types/operationcontainer.go @@ -16,31 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // OperationContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/types.ts#L145-L167 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/types.ts#L158-L180 type OperationContainer struct { - // Create Indexes the specified document if it does not already exist. + AdditionalOperationContainerProperty map[string]json.RawMessage `json:"-"` + // Create Index the specified document if it does not already exist. // The following line must contain the source data to be indexed. Create *CreateOperation `json:"create,omitempty"` - // Delete Removes the specified document from the index. + // Delete Remove the specified document from the index. Delete *DeleteOperation `json:"delete,omitempty"` - // Index Indexes the specified document. - // If the document exists, replaces the document and increments the version. + // Index Index the specified document. + // If the document exists, it replaces the document and increments the version. // The following line must contain the source data to be indexed. Index *IndexOperation `json:"index,omitempty"` - // Update Performs a partial document update. + // Update Perform a partial document update. // The following line must contain the partial document and update options. Update *UpdateOperation `json:"update,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s OperationContainer) MarshalJSON() ([]byte, error) { + type opt OperationContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalOperationContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalOperationContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewOperationContainer returns a OperationContainer. func NewOperationContainer() *OperationContainer { - r := &OperationContainer{} + r := &OperationContainer{ + AdditionalOperationContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type OperationContainerVariant interface { + OperationContainerCaster() *OperationContainer +} + +func (s *OperationContainer) OperationContainerCaster() *OperationContainer { + return s +} diff --git a/typedapi/types/outlierdetectionparameters.go b/typedapi/types/outlierdetectionparameters.go index d5982ac37d..1d965e33b1 100644 --- a/typedapi/types/outlierdetectionparameters.go +++ b/typedapi/types/outlierdetectionparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // OutlierDetectionParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L527-L561 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L528-L562 type OutlierDetectionParameters struct { // ComputeFeatureInfluence Specifies whether the feature influence calculation is enabled. ComputeFeatureInfluence *bool `json:"compute_feature_influence,omitempty"` @@ -178,3 +178,5 @@ func NewOutlierDetectionParameters() *OutlierDetectionParameters { return r } + +// false diff --git a/typedapi/types/overallbucket.go b/typedapi/types/overallbucket.go index 91308c0220..0034fc5859 100644 --- a/typedapi/types/overallbucket.go +++ b/typedapi/types/overallbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // OverallBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Bucket.ts#L130-L145 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Bucket.ts#L129-L144 type OverallBucket struct { // BucketSpan The length of the bucket in seconds. Matches the job with the longest // bucket_span value. @@ -48,7 +48,7 @@ type OverallBucket struct { // Timestamp The start time of the bucket for which these results were calculated. Timestamp int64 `json:"timestamp"` // TimestampString The start time of the bucket for which these results were calculated. - TimestampString DateTime `json:"timestamp_string"` + TimestampString DateTime `json:"timestamp_string,omitempty"` } func (s *OverallBucket) UnmarshalJSON(data []byte) error { @@ -139,3 +139,5 @@ func NewOverallBucket() *OverallBucket { return r } + +// false diff --git a/typedapi/types/overallbucketjob.go b/typedapi/types/overallbucketjob.go index 365a4ac89a..2b0873b6aa 100644 --- a/typedapi/types/overallbucketjob.go +++ b/typedapi/types/overallbucketjob.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // OverallBucketJob type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Bucket.ts#L146-L149 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Bucket.ts#L145-L148 type OverallBucketJob struct { JobId string `json:"job_id"` MaxAnomalyScore Float64 `json:"max_anomaly_score"` @@ -84,3 +84,5 @@ func NewOverallBucketJob() *OverallBucketJob { return r } + +// false diff --git a/typedapi/types/overlapping.go b/typedapi/types/overlapping.go index 1aa3c8701b..772081d86b 100644 --- a/typedapi/types/overlapping.go +++ b/typedapi/types/overlapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // Overlapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42 type Overlapping struct { IndexPatterns []string `json:"index_patterns"` Name string `json:"name"` @@ -72,3 +72,5 @@ func NewOverlapping() *Overlapping { return r } + +// false diff --git a/typedapi/types/page.go b/typedapi/types/page.go index b9c2d512ba..e856e54fc7 100644 --- a/typedapi/types/page.go +++ b/typedapi/types/page.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Page type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Page.ts#L22-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Page.ts#L22-L33 type Page struct { // From Skips the specified number of items. From *int `json:"from,omitempty"` @@ -97,3 +97,13 @@ func NewPage() *Page { return r } + +// true + +type PageVariant interface { + PageCaster() *Page +} + +func (s *Page) PageCaster() *Page { + return s +} diff --git a/typedapi/types/pagerdutyaction.go b/typedapi/types/pagerdutyaction.go index 29267124b5..43ab45503b 100644 --- a/typedapi/types/pagerdutyaction.go +++ b/typedapi/types/pagerdutyaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // PagerDutyAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L54-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L54-L54 type PagerDutyAction struct { Account *string `json:"account,omitempty"` AttachPayload bool `json:"attach_payload"` @@ -161,3 +161,13 @@ func NewPagerDutyAction() *PagerDutyAction { return r } + +// true + +type PagerDutyActionVariant interface { + PagerDutyActionCaster() *PagerDutyAction +} + +func (s *PagerDutyAction) PagerDutyActionCaster() *PagerDutyAction { + return s +} diff --git a/typedapi/types/pagerdutycontext.go b/typedapi/types/pagerdutycontext.go index ee5ee1f528..95180066d2 100644 --- a/typedapi/types/pagerdutycontext.go +++ b/typedapi/types/pagerdutycontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // PagerDutyContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L61-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L61-L65 type PagerDutyContext struct { Href *string `json:"href,omitempty"` Src *string `json:"src,omitempty"` @@ -95,3 +95,13 @@ func NewPagerDutyContext() *PagerDutyContext { return r } + +// true + +type PagerDutyContextVariant interface { + PagerDutyContextCaster() *PagerDutyContext +} + +func (s *PagerDutyContext) PagerDutyContextCaster() *PagerDutyContext { + return s +} diff --git a/typedapi/types/pagerdutyevent.go b/typedapi/types/pagerdutyevent.go index 4d8679490a..66db9c164c 100644 --- a/typedapi/types/pagerdutyevent.go +++ b/typedapi/types/pagerdutyevent.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // PagerDutyEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L40-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L40-L52 type PagerDutyEvent struct { Account *string `json:"account,omitempty"` AttachPayload bool `json:"attach_payload"` @@ -161,3 +161,5 @@ func NewPagerDutyEvent() *PagerDutyEvent { return r } + +// false diff --git a/typedapi/types/pagerdutyeventproxy.go b/typedapi/types/pagerdutyeventproxy.go index a2b9b4fd9b..1e0026a7dd 100644 --- a/typedapi/types/pagerdutyeventproxy.go +++ b/typedapi/types/pagerdutyeventproxy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PagerDutyEventProxy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L56-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L56-L59 type PagerDutyEventProxy struct { Host *string `json:"host,omitempty"` Port *int `json:"port,omitempty"` @@ -84,3 +84,13 @@ func NewPagerDutyEventProxy() *PagerDutyEventProxy { return r } + +// true + +type PagerDutyEventProxyVariant interface { + PagerDutyEventProxyCaster() *PagerDutyEventProxy +} + +func (s *PagerDutyEventProxy) PagerDutyEventProxyCaster() *PagerDutyEventProxy { + return s +} diff --git a/typedapi/types/pagerdutyresult.go b/typedapi/types/pagerdutyresult.go index 85eb5acdcf..2b3f152f1e 100644 --- a/typedapi/types/pagerdutyresult.go +++ b/typedapi/types/pagerdutyresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PagerDutyResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L78-L83 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L78-L83 type PagerDutyResult struct { Event PagerDutyEvent `json:"event"` Reason *string `json:"reason,omitempty"` @@ -92,3 +92,5 @@ func NewPagerDutyResult() *PagerDutyResult { return r } + +// false diff --git a/typedapi/types/painlesscontextsetup.go b/typedapi/types/painlesscontextsetup.go index f0dde19e8f..6750c6bf08 100644 --- a/typedapi/types/painlesscontextsetup.go +++ b/typedapi/types/painlesscontextsetup.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,16 +30,25 @@ import ( // PainlessContextSetup type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/scripts_painless_execute/types.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/scripts_painless_execute/types.ts#L27-L46 type PainlessContextSetup struct { - // Document Document that’s temporarily indexed in-memory and accessible from the script. + // Document Document that's temporarily indexed in-memory and accessible from the script. Document json.RawMessage `json:"document,omitempty"` - // Index Index containing a mapping that’s compatible with the indexed document. + // Index Index containing a mapping that's compatible with the indexed document. // You may specify a remote index by prefixing the index with the remote cluster // alias. + // For example, `remote1:my_index` indicates that you want to run the painless + // script against the "my_index" index on the "remote1" cluster. + // This request will be forwarded to the "remote1" cluster if you have + // configured a connection to that remote cluster. + // + // NOTE: Wildcards are not accepted in the index expression for this endpoint. + // The expression `*:myindex` will return the error "No such remote cluster" and + // the expression `logs*` or `remote1:logs*` will return the error "index not + // found". Index string `json:"index"` // Query Use this parameter to specify a query for computing a score. - Query Query `json:"query"` + Query *Query `json:"query,omitempty"` } func (s *PainlessContextSetup) UnmarshalJSON(data []byte) error { @@ -83,3 +92,13 @@ func NewPainlessContextSetup() *PainlessContextSetup { return r } + +// true + +type PainlessContextSetupVariant interface { + PainlessContextSetupCaster() *PainlessContextSetup +} + +func (s *PainlessContextSetup) PainlessContextSetupCaster() *PainlessContextSetup { + return s +} diff --git a/typedapi/types/parentaggregate.go b/typedapi/types/parentaggregate.go index 77f6a86e7c..6578144ae3 100644 --- a/typedapi/types/parentaggregate.go +++ b/typedapi/types/parentaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // ParentAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L786-L787 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L894-L898 type ParentAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *ParentAggregate) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s ParentAggregate) MarshalJSON() ([]byte, error) { // NewParentAggregate returns a ParentAggregate. func NewParentAggregate() *ParentAggregate { r := &ParentAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/parentaggregation.go b/typedapi/types/parentaggregation.go index 3877ecf63d..00a88cdd82 100644 --- a/typedapi/types/parentaggregation.go +++ b/typedapi/types/parentaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ParentAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L645-L650 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L662-L667 type ParentAggregation struct { // Type The child type that should be selected. Type *string `json:"type,omitempty"` @@ -67,3 +67,13 @@ func NewParentAggregation() *ParentAggregation { return r } + +// true + +type ParentAggregationVariant interface { + ParentAggregationCaster() *ParentAggregation +} + +func (s *ParentAggregation) ParentAggregationCaster() *ParentAggregation { + return s +} diff --git a/typedapi/types/parentidquery.go b/typedapi/types/parentidquery.go index eaf3b9fc49..2568179164 100644 --- a/typedapi/types/parentidquery.go +++ b/typedapi/types/parentidquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ParentIdQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/joining.ts#L132-L146 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/joining.ts#L141-L158 type ParentIdQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -127,3 +127,13 @@ func NewParentIdQuery() *ParentIdQuery { return r } + +// true + +type ParentIdQueryVariant interface { + ParentIdQueryCaster() *ParentIdQuery +} + +func (s *ParentIdQuery) ParentIdQueryCaster() *ParentIdQuery { + return s +} diff --git a/typedapi/types/parenttaskinfo.go b/typedapi/types/parenttaskinfo.go index f12db04524..dfe826052e 100644 --- a/typedapi/types/parenttaskinfo.go +++ b/typedapi/types/parenttaskinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,12 +31,22 @@ import ( // ParentTaskInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/tasks/_types/TaskListResponseBase.ts#L45-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/tasks/_types/TaskListResponseBase.ts#L45-L47 type ParentTaskInfo struct { - Action string `json:"action"` - Cancellable bool `json:"cancellable"` - Cancelled *bool `json:"cancelled,omitempty"` - Children []TaskInfo `json:"children,omitempty"` + Action string `json:"action"` + Cancellable bool `json:"cancellable"` + Cancelled *bool `json:"cancelled,omitempty"` + Children []TaskInfo `json:"children,omitempty"` + // Description Human readable text that identifies the particular request that the task is + // performing. + // For example, it might identify the search request being performed by a search + // task. + // Other kinds of tasks have different descriptions, like `_reindex` which has + // the source and the destination, or `_bulk` which just has the number of + // requests and the destination indices. + // Many requests will have only an empty description because more detailed + // information about the request is not easily available or particularly helpful + // in identifying the request. Description *string `json:"description,omitempty"` Headers map[string]string `json:"headers"` Id int64 `json:"id"` @@ -45,7 +55,13 @@ type ParentTaskInfo struct { RunningTime Duration `json:"running_time,omitempty"` RunningTimeInNanos int64 `json:"running_time_in_nanos"` StartTimeInMillis int64 `json:"start_time_in_millis"` - // Status Task status information can vary wildly from task to task. + // Status The internal status of the task, which varies from task to task. + // The format also varies. + // While the goal is to keep the status for a particular task consistent from + // version to version, this is not always possible because sometimes the + // implementation changes. + // Fields might be removed from the status for a particular request so any + // parsing you do of the status might break in minor releases. Status json.RawMessage `json:"status,omitempty"` Type string `json:"type"` } @@ -195,8 +211,10 @@ func (s *ParentTaskInfo) UnmarshalJSON(data []byte) error { // NewParentTaskInfo returns a ParentTaskInfo. func NewParentTaskInfo() *ParentTaskInfo { r := &ParentTaskInfo{ - Headers: make(map[string]string, 0), + Headers: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/passthroughinferenceoptions.go b/typedapi/types/passthroughinferenceoptions.go index 3f7f0a0141..c6c7493bb1 100644 --- a/typedapi/types/passthroughinferenceoptions.go +++ b/typedapi/types/passthroughinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PassThroughInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L224-L231 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L208-L215 type PassThroughInferenceOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -89,3 +89,13 @@ func NewPassThroughInferenceOptions() *PassThroughInferenceOptions { return r } + +// true + +type PassThroughInferenceOptionsVariant interface { + PassThroughInferenceOptionsCaster() *PassThroughInferenceOptions +} + +func (s *PassThroughInferenceOptions) PassThroughInferenceOptionsCaster() *PassThroughInferenceOptions { + return s +} diff --git a/typedapi/types/passthroughinferenceupdateoptions.go b/typedapi/types/passthroughinferenceupdateoptions.go index df16f6920e..000914c0ce 100644 --- a/typedapi/types/passthroughinferenceupdateoptions.go +++ b/typedapi/types/passthroughinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PassThroughInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L385-L390 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L373-L378 type PassThroughInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -83,3 +83,13 @@ func NewPassThroughInferenceUpdateOptions() *PassThroughInferenceUpdateOptions { return r } + +// true + +type PassThroughInferenceUpdateOptionsVariant interface { + PassThroughInferenceUpdateOptionsCaster() *PassThroughInferenceUpdateOptions +} + +func (s *PassThroughInferenceUpdateOptions) PassThroughInferenceUpdateOptionsCaster() *PassThroughInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/passthroughobjectproperty.go b/typedapi/types/passthroughobjectproperty.go new file mode 100644 index 0000000000..9bf0e31b78 --- /dev/null +++ b/typedapi/types/passthroughobjectproperty.go @@ -0,0 +1,891 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" +) + +// PassthroughObjectProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/complex.ts#L52-L57 +type PassthroughObjectProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Priority *int `json:"priority,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *PassthroughObjectProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "priority": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + s.Priority = &value + case float64: + f := int(v) + s.Priority = &f + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PassthroughObjectProperty) MarshalJSON() ([]byte, error) { + type innerPassthroughObjectProperty PassthroughObjectProperty + tmp := innerPassthroughObjectProperty{ + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Priority: s.Priority, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + Type: s.Type, + } + + tmp.Type = "passthrough" + + return json.Marshal(tmp) +} + +// NewPassthroughObjectProperty returns a PassthroughObjectProperty. +func NewPassthroughObjectProperty() *PassthroughObjectProperty { + r := &PassthroughObjectProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +// true + +type PassthroughObjectPropertyVariant interface { + PassthroughObjectPropertyCaster() *PassthroughObjectProperty +} + +func (s *PassthroughObjectProperty) PassthroughObjectPropertyCaster() *PassthroughObjectProperty { + return s +} diff --git a/typedapi/types/pathhierarchytokenizer.go b/typedapi/types/pathhierarchytokenizer.go index 0d95b1b920..de3a92c775 100644 --- a/typedapi/types/pathhierarchytokenizer.go +++ b/typedapi/types/pathhierarchytokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PathHierarchyTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L89-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L95-L102 type PathHierarchyTokenizer struct { BufferSize Stringifiedinteger `json:"buffer_size,omitempty"` Delimiter *string `json:"delimiter,omitempty"` @@ -135,3 +135,13 @@ func NewPathHierarchyTokenizer() *PathHierarchyTokenizer { return r } + +// true + +type PathHierarchyTokenizerVariant interface { + PathHierarchyTokenizerCaster() *PathHierarchyTokenizer +} + +func (s *PathHierarchyTokenizer) PathHierarchyTokenizerCaster() *PathHierarchyTokenizer { + return s +} diff --git a/typedapi/types/patternanalyzer.go b/typedapi/types/patternanalyzer.go index 693de37a16..cb386854ef 100644 --- a/typedapi/types/patternanalyzer.go +++ b/typedapi/types/patternanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PatternAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L74-L81 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L311-L318 type PatternAnalyzer struct { Flags *string `json:"flags,omitempty"` Lowercase *bool `json:"lowercase,omitempty"` @@ -148,3 +148,13 @@ func NewPatternAnalyzer() *PatternAnalyzer { return r } + +// true + +type PatternAnalyzerVariant interface { + PatternAnalyzerCaster() *PatternAnalyzer +} + +func (s *PatternAnalyzer) PatternAnalyzerCaster() *PatternAnalyzer { + return s +} diff --git a/typedapi/types/patterncapturetokenfilter.go b/typedapi/types/patterncapturetokenfilter.go index ba3f1c11b1..e98ae1551f 100644 --- a/typedapi/types/patterncapturetokenfilter.go +++ b/typedapi/types/patterncapturetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // PatternCaptureTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L280-L284 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L279-L283 type PatternCaptureTokenFilter struct { Patterns []string `json:"patterns"` PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` @@ -99,3 +99,13 @@ func NewPatternCaptureTokenFilter() *PatternCaptureTokenFilter { return r } + +// true + +type PatternCaptureTokenFilterVariant interface { + PatternCaptureTokenFilterCaster() *PatternCaptureTokenFilter +} + +func (s *PatternCaptureTokenFilter) PatternCaptureTokenFilterCaster() *PatternCaptureTokenFilter { + return s +} diff --git a/typedapi/types/patternreplacecharfilter.go b/typedapi/types/patternreplacecharfilter.go index fd34c7ffd3..58d045c761 100644 --- a/typedapi/types/patternreplacecharfilter.go +++ b/typedapi/types/patternreplacecharfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PatternReplaceCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/char_filters.ts#L54-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/char_filters.ts#L57-L62 type PatternReplaceCharFilter struct { Flags *string `json:"flags,omitempty"` Pattern string `json:"pattern"` @@ -128,3 +128,13 @@ func NewPatternReplaceCharFilter() *PatternReplaceCharFilter { return r } + +// true + +type PatternReplaceCharFilterVariant interface { + PatternReplaceCharFilterCaster() *PatternReplaceCharFilter +} + +func (s *PatternReplaceCharFilter) PatternReplaceCharFilterCaster() *PatternReplaceCharFilter { + return s +} diff --git a/typedapi/types/patternreplacetokenfilter.go b/typedapi/types/patternreplacetokenfilter.go index d264e781d6..f5dee06689 100644 --- a/typedapi/types/patternreplacetokenfilter.go +++ b/typedapi/types/patternreplacetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PatternReplaceTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L286-L292 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L285-L291 type PatternReplaceTokenFilter struct { All *bool `json:"all,omitempty"` Flags *string `json:"flags,omitempty"` @@ -144,3 +144,13 @@ func NewPatternReplaceTokenFilter() *PatternReplaceTokenFilter { return r } + +// true + +type PatternReplaceTokenFilterVariant interface { + PatternReplaceTokenFilterCaster() *PatternReplaceTokenFilter +} + +func (s *PatternReplaceTokenFilter) PatternReplaceTokenFilterCaster() *PatternReplaceTokenFilter { + return s +} diff --git a/typedapi/types/patterntokenizer.go b/typedapi/types/patterntokenizer.go index d09aad9995..24ee5aba22 100644 --- a/typedapi/types/patterntokenizer.go +++ b/typedapi/types/patterntokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PatternTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L98-L103 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L104-L109 type PatternTokenizer struct { Flags *string `json:"flags,omitempty"` Group *int `json:"group,omitempty"` @@ -132,3 +132,13 @@ func NewPatternTokenizer() *PatternTokenizer { return r } + +// true + +type PatternTokenizerVariant interface { + PatternTokenizerCaster() *PatternTokenizer +} + +func (s *PatternTokenizer) PatternTokenizerCaster() *PatternTokenizer { + return s +} diff --git a/typedapi/types/pendingtask.go b/typedapi/types/pendingtask.go index bbc8722e78..08468d8ab5 100644 --- a/typedapi/types/pendingtask.go +++ b/typedapi/types/pendingtask.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PendingTask type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/pending_tasks/types.ts#L23-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/pending_tasks/types.ts#L23-L47 type PendingTask struct { // Executing Indicates whether the pending tasks are currently executing or not. Executing bool `json:"executing"` @@ -142,3 +142,5 @@ func NewPendingTask() *PendingTask { return r } + +// false diff --git a/typedapi/types/pendingtasksrecord.go b/typedapi/types/pendingtasksrecord.go index 844b5ac175..a9a6973cf6 100644 --- a/typedapi/types/pendingtasksrecord.go +++ b/typedapi/types/pendingtasksrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PendingTasksRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/pending_tasks/types.ts#L20-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/pending_tasks/types.ts#L20-L41 type PendingTasksRecord struct { // InsertOrder The task insertion order. InsertOrder *string `json:"insertOrder,omitempty"` @@ -117,3 +117,5 @@ func NewPendingTasksRecord() *PendingTasksRecord { return r } + +// false diff --git a/typedapi/types/percentage.go b/typedapi/types/percentage.go index 8f293ebdef..b6d92eeddc 100644 --- a/typedapi/types/percentage.go +++ b/typedapi/types/percentage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // float32 // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Numeric.ts#L28-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Numeric.ts#L28-L28 type Percentage any + +type PercentageVariant interface { + PercentageCaster() *Percentage +} diff --git a/typedapi/types/percentagescoreheuristic.go b/typedapi/types/percentagescoreheuristic.go index 4ae374501e..ae1e007135 100644 --- a/typedapi/types/percentagescoreheuristic.go +++ b/typedapi/types/percentagescoreheuristic.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // PercentageScoreHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L766-L766 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L811-L811 type PercentageScoreHeuristic struct { } @@ -32,3 +32,13 @@ func NewPercentageScoreHeuristic() *PercentageScoreHeuristic { return r } + +// true + +type PercentageScoreHeuristicVariant interface { + PercentageScoreHeuristicCaster() *PercentageScoreHeuristic +} + +func (s *PercentageScoreHeuristic) PercentageScoreHeuristicCaster() *PercentageScoreHeuristic { + return s +} diff --git a/typedapi/types/percentileranksaggregation.go b/typedapi/types/percentileranksaggregation.go index e4fad4e353..311da385b3 100644 --- a/typedapi/types/percentileranksaggregation.go +++ b/typedapi/types/percentileranksaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PercentileRanksAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L174-L193 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L180-L202 type PercentileRanksAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -136,3 +136,13 @@ func NewPercentileRanksAggregation() *PercentileRanksAggregation { return r } + +// true + +type PercentileRanksAggregationVariant interface { + PercentileRanksAggregationCaster() *PercentileRanksAggregation +} + +func (s *PercentileRanksAggregation) PercentileRanksAggregationCaster() *PercentileRanksAggregation { + return s +} diff --git a/typedapi/types/percentiles.go b/typedapi/types/percentiles.go index fb3c7ea5b5..b0c1a5a2a1 100644 --- a/typedapi/types/percentiles.go +++ b/typedapi/types/percentiles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // KeyedPercentiles // []ArrayPercentilesItem // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L150-L151 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L152-L153 type Percentiles any diff --git a/typedapi/types/percentilesaggregation.go b/typedapi/types/percentilesaggregation.go index 302ff1221d..647c8ede2c 100644 --- a/typedapi/types/percentilesaggregation.go +++ b/typedapi/types/percentilesaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PercentilesAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L195-L214 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L204-L223 type PercentilesAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -136,3 +136,13 @@ func NewPercentilesAggregation() *PercentilesAggregation { return r } + +// true + +type PercentilesAggregationVariant interface { + PercentilesAggregationCaster() *PercentilesAggregation +} + +func (s *PercentilesAggregation) PercentilesAggregationCaster() *PercentilesAggregation { + return s +} diff --git a/typedapi/types/percentilesbucketaggregate.go b/typedapi/types/percentilesbucketaggregate.go index ce7a0aa682..7b7d3628a1 100644 --- a/typedapi/types/percentilesbucketaggregate.go +++ b/typedapi/types/percentilesbucketaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // PercentilesBucketAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L178-L179 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L180-L181 type PercentilesBucketAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *PercentilesBucketAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewPercentilesBucketAggregate() *PercentilesBucketAggregate { return r } + +// false diff --git a/typedapi/types/percentilesbucketaggregation.go b/typedapi/types/percentilesbucketaggregation.go index fccd5faa78..4a53d3107e 100644 --- a/typedapi/types/percentilesbucketaggregation.go +++ b/typedapi/types/percentilesbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // PercentilesBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L354-L359 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L389-L397 type PercentilesBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -100,3 +100,13 @@ func NewPercentilesBucketAggregation() *PercentilesBucketAggregation { return r } + +// true + +type PercentilesBucketAggregationVariant interface { + PercentilesBucketAggregationCaster() *PercentilesBucketAggregation +} + +func (s *PercentilesBucketAggregation) PercentilesBucketAggregationCaster() *PercentilesBucketAggregation { + return s +} diff --git a/typedapi/types/percolatequery.go b/typedapi/types/percolatequery.go index 9365c9acfd..475ba03308 100644 --- a/typedapi/types/percolatequery.go +++ b/typedapi/types/percolatequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PercolateQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L202-L239 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L205-L245 type PercolateQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -175,3 +175,13 @@ func NewPercolateQuery() *PercolateQuery { return r } + +// true + +type PercolateQueryVariant interface { + PercolateQueryCaster() *PercolateQuery +} + +func (s *PercolateQuery) PercolateQueryCaster() *PercolateQuery { + return s +} diff --git a/typedapi/types/percolatorproperty.go b/typedapi/types/percolatorproperty.go index e75bcb43d7..dae649bfb4 100644 --- a/typedapi/types/percolatorproperty.go +++ b/typedapi/types/percolatorproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,19 +29,21 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // PercolatorProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L188-L190 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L192-L194 type PercolatorProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { @@ -83,301 +85,313 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -426,306 +440,323 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -740,12 +771,13 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { func (s PercolatorProperty) MarshalJSON() ([]byte, error) { type innerPercolatorProperty PercolatorProperty tmp := innerPercolatorProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Type: s.Type, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "percolator" @@ -756,10 +788,20 @@ func (s PercolatorProperty) MarshalJSON() ([]byte, error) { // NewPercolatorProperty returns a PercolatorProperty. func NewPercolatorProperty() *PercolatorProperty { r := &PercolatorProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type PercolatorPropertyVariant interface { + PercolatorPropertyCaster() *PercolatorProperty +} + +func (s *PercolatorProperty) PercolatorPropertyCaster() *PercolatorProperty { + return s +} diff --git a/typedapi/types/perpartitioncategorization.go b/typedapi/types/perpartitioncategorization.go index a0f35c803c..f5c9d4c10a 100644 --- a/typedapi/types/perpartitioncategorization.go +++ b/typedapi/types/perpartitioncategorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PerPartitionCategorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Analysis.ts#L150-L159 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Analysis.ts#L150-L159 type PerPartitionCategorization struct { // Enabled To enable this setting, you must also set the `partition_field_name` property // to the same value in every detector that uses the keyword `mlcategory`. @@ -100,3 +100,13 @@ func NewPerPartitionCategorization() *PerPartitionCategorization { return r } + +// true + +type PerPartitionCategorizationVariant interface { + PerPartitionCategorizationCaster() *PerPartitionCategorization +} + +func (s *PerPartitionCategorization) PerPartitionCategorizationCaster() *PerPartitionCategorization { + return s +} diff --git a/typedapi/types/persiananalyzer.go b/typedapi/types/persiananalyzer.go new file mode 100644 index 0000000000..2b4693cee3 --- /dev/null +++ b/typedapi/types/persiananalyzer.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PersianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L235-L239 +type PersianAnalyzer struct { + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *PersianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PersianAnalyzer) MarshalJSON() ([]byte, error) { + type innerPersianAnalyzer PersianAnalyzer + tmp := innerPersianAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "persian" + + return json.Marshal(tmp) +} + +// NewPersianAnalyzer returns a PersianAnalyzer. +func NewPersianAnalyzer() *PersianAnalyzer { + r := &PersianAnalyzer{} + + return r +} + +// true + +type PersianAnalyzerVariant interface { + PersianAnalyzerCaster() *PersianAnalyzer +} + +func (s *PersianAnalyzer) PersianAnalyzerCaster() *PersianAnalyzer { + return s +} diff --git a/typedapi/types/persistenttaskstatus.go b/typedapi/types/persistenttaskstatus.go index 5665ecec3d..cfe5cbc95b 100644 --- a/typedapi/types/persistenttaskstatus.go +++ b/typedapi/types/persistenttaskstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // PersistentTaskStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L56-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L56-L58 type PersistentTaskStatus struct { Status shutdownstatus.ShutdownStatus `json:"status"` } @@ -37,3 +37,5 @@ func NewPersistentTaskStatus() *PersistentTaskStatus { return r } + +// false diff --git a/typedapi/types/phase.go b/typedapi/types/phase.go index 87b5bd5b84..96eda502bd 100644 --- a/typedapi/types/phase.go +++ b/typedapi/types/phase.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,10 +30,10 @@ import ( // Phase type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L26-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L26-L29 type Phase struct { Actions *IlmActions `json:"actions,omitempty"` - MinAge *Duration `json:"min_age,omitempty"` + MinAge Duration `json:"min_age,omitempty"` } func (s *Phase) UnmarshalJSON(data []byte) error { @@ -72,3 +72,13 @@ func NewPhase() *Phase { return r } + +// true + +type PhaseVariant interface { + PhaseCaster() *Phase +} + +func (s *Phase) PhaseCaster() *Phase { + return s +} diff --git a/typedapi/types/phases.go b/typedapi/types/phases.go index 3007dedf1b..78f24ec42c 100644 --- a/typedapi/types/phases.go +++ b/typedapi/types/phases.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Phases type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L31-L37 type Phases struct { Cold *Phase `json:"cold,omitempty"` Delete *Phase `json:"delete,omitempty"` @@ -37,3 +37,13 @@ func NewPhases() *Phases { return r } + +// true + +type PhasesVariant interface { + PhasesCaster() *Phases +} + +func (s *Phases) PhasesCaster() *Phases { + return s +} diff --git a/typedapi/types/phonetictokenfilter.go b/typedapi/types/phonetictokenfilter.go index cf557fff9d..80536d8073 100644 --- a/typedapi/types/phonetictokenfilter.go +++ b/typedapi/types/phonetictokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -36,14 +36,14 @@ import ( // PhoneticTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/phonetic-plugin.ts#L64-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/phonetic-plugin.ts#L64-L72 type PhoneticTokenFilter struct { Encoder phoneticencoder.PhoneticEncoder `json:"encoder"` - Languageset []phoneticlanguage.PhoneticLanguage `json:"languageset"` + Languageset []phoneticlanguage.PhoneticLanguage `json:"languageset,omitempty"` MaxCodeLen *int `json:"max_code_len,omitempty"` - NameType phoneticnametype.PhoneticNameType `json:"name_type"` + NameType *phoneticnametype.PhoneticNameType `json:"name_type,omitempty"` Replace *bool `json:"replace,omitempty"` - RuleType phoneticruletype.PhoneticRuleType `json:"rule_type"` + RuleType *phoneticruletype.PhoneticRuleType `json:"rule_type,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } @@ -164,3 +164,13 @@ func NewPhoneticTokenFilter() *PhoneticTokenFilter { return r } + +// true + +type PhoneticTokenFilterVariant interface { + PhoneticTokenFilterCaster() *PhoneticTokenFilter +} + +func (s *PhoneticTokenFilter) PhoneticTokenFilterCaster() *PhoneticTokenFilter { + return s +} diff --git a/typedapi/types/phrasesuggest.go b/typedapi/types/phrasesuggest.go index 0ebf86d230..ab86497660 100644 --- a/typedapi/types/phrasesuggest.go +++ b/typedapi/types/phrasesuggest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PhraseSuggest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L57-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L57-L62 type PhraseSuggest struct { Length int `json:"length"` Offset int `json:"offset"` @@ -125,3 +125,5 @@ func NewPhraseSuggest() *PhraseSuggest { return r } + +// false diff --git a/typedapi/types/phrasesuggestcollate.go b/typedapi/types/phrasesuggestcollate.go index ef299a5778..5940698dfc 100644 --- a/typedapi/types/phrasesuggestcollate.go +++ b/typedapi/types/phrasesuggestcollate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PhraseSuggestCollate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L333-L346 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L333-L346 type PhraseSuggestCollate struct { // Params Parameters to use if the query is templated. Params map[string]json.RawMessage `json:"params,omitempty"` @@ -92,8 +92,18 @@ func (s *PhraseSuggestCollate) UnmarshalJSON(data []byte) error { // NewPhraseSuggestCollate returns a PhraseSuggestCollate. func NewPhraseSuggestCollate() *PhraseSuggestCollate { r := &PhraseSuggestCollate{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type PhraseSuggestCollateVariant interface { + PhraseSuggestCollateCaster() *PhraseSuggestCollate +} + +func (s *PhraseSuggestCollate) PhraseSuggestCollateCaster() *PhraseSuggestCollate { + return s +} diff --git a/typedapi/types/phrasesuggestcollatequery.go b/typedapi/types/phrasesuggestcollatequery.go index e29832c8e3..b0d0b1d953 100644 --- a/typedapi/types/phrasesuggestcollatequery.go +++ b/typedapi/types/phrasesuggestcollatequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PhraseSuggestCollateQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L348-L357 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L348-L357 type PhraseSuggestCollateQuery struct { // Id The search template ID. Id *string `json:"id,omitempty"` @@ -82,3 +82,13 @@ func NewPhraseSuggestCollateQuery() *PhraseSuggestCollateQuery { return r } + +// true + +type PhraseSuggestCollateQueryVariant interface { + PhraseSuggestCollateQueryCaster() *PhraseSuggestCollateQuery +} + +func (s *PhraseSuggestCollateQuery) PhraseSuggestCollateQueryCaster() *PhraseSuggestCollateQuery { + return s +} diff --git a/typedapi/types/phrasesuggester.go b/typedapi/types/phrasesuggester.go index 9f7b96afbb..eadfaa3474 100644 --- a/typedapi/types/phrasesuggester.go +++ b/typedapi/types/phrasesuggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PhraseSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L359-L417 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L359-L417 type PhraseSuggester struct { // Analyzer The analyzer to analyze the suggest text with. // Defaults to the search analyzer of the suggest field. @@ -299,3 +299,13 @@ func NewPhraseSuggester() *PhraseSuggester { return r } + +// true + +type PhraseSuggesterVariant interface { + PhraseSuggesterCaster() *PhraseSuggester +} + +func (s *PhraseSuggester) PhraseSuggesterCaster() *PhraseSuggester { + return s +} diff --git a/typedapi/types/phrasesuggesthighlight.go b/typedapi/types/phrasesuggesthighlight.go index fd6c6273fe..feef41e81e 100644 --- a/typedapi/types/phrasesuggesthighlight.go +++ b/typedapi/types/phrasesuggesthighlight.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PhraseSuggestHighlight type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L419-L428 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L419-L428 type PhraseSuggestHighlight struct { // PostTag Use in conjunction with `pre_tag` to define the HTML tags to use for the // highlighted text. @@ -91,3 +91,13 @@ func NewPhraseSuggestHighlight() *PhraseSuggestHighlight { return r } + +// true + +type PhraseSuggestHighlightVariant interface { + PhraseSuggestHighlightCaster() *PhraseSuggestHighlight +} + +func (s *PhraseSuggestHighlight) PhraseSuggestHighlightCaster() *PhraseSuggestHighlight { + return s +} diff --git a/typedapi/types/phrasesuggestoption.go b/typedapi/types/phrasesuggestoption.go index f4957a5424..22dee8d040 100644 --- a/typedapi/types/phrasesuggestoption.go +++ b/typedapi/types/phrasesuggestoption.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PhraseSuggestOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L86-L91 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L86-L91 type PhraseSuggestOption struct { CollateMatch *bool `json:"collate_match,omitempty"` Highlighted *string `json:"highlighted,omitempty"` @@ -119,3 +119,5 @@ func NewPhraseSuggestOption() *PhraseSuggestOption { return r } + +// false diff --git a/typedapi/types/pinneddoc.go b/typedapi/types/pinneddoc.go index 181286d458..d19beedc9b 100644 --- a/typedapi/types/pinneddoc.go +++ b/typedapi/types/pinneddoc.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // PinnedDoc type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L262-L271 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L269-L278 type PinnedDoc struct { // Id_ The unique document ID. Id_ string `json:"_id"` @@ -74,3 +74,13 @@ func NewPinnedDoc() *PinnedDoc { return r } + +// true + +type PinnedDocVariant interface { + PinnedDocCaster() *PinnedDoc +} + +func (s *PinnedDoc) PinnedDocCaster() *PinnedDoc { + return s +} diff --git a/typedapi/types/pinnedquery.go b/typedapi/types/pinnedquery.go index 9df895bc18..c41397a433 100644 --- a/typedapi/types/pinnedquery.go +++ b/typedapi/types/pinnedquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,8 +31,9 @@ import ( // PinnedQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L241-L260 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L247-L267 type PinnedQuery struct { + AdditionalPinnedQueryProperty map[string]json.RawMessage `json:"-"` // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. @@ -47,7 +48,7 @@ type PinnedQuery struct { Ids []string `json:"ids,omitempty"` // Organic Any choice of query used to rank documents which will be ranked below the // "pinned" documents. - Organic *Query `json:"organic,omitempty"` + Organic Query `json:"organic"` QueryName_ *string `json:"_name,omitempty"` } @@ -109,14 +110,68 @@ func (s *PinnedQuery) UnmarshalJSON(data []byte) error { } s.QueryName_ = &o + default: + + if key, ok := t.(string); ok { + if s.AdditionalPinnedQueryProperty == nil { + s.AdditionalPinnedQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalPinnedQueryProperty", err) + } + s.AdditionalPinnedQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s PinnedQuery) MarshalJSON() ([]byte, error) { + type opt PinnedQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalPinnedQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalPinnedQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewPinnedQuery returns a PinnedQuery. func NewPinnedQuery() *PinnedQuery { - r := &PinnedQuery{} + r := &PinnedQuery{ + AdditionalPinnedQueryProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type PinnedQueryVariant interface { + PinnedQueryCaster() *PinnedQuery +} + +func (s *PinnedQuery) PinnedQueryCaster() *PinnedQuery { + return s +} diff --git a/typedapi/types/pipelineconfig.go b/typedapi/types/pipelineconfig.go index ad14eebc64..3aa5554655 100644 --- a/typedapi/types/pipelineconfig.go +++ b/typedapi/types/pipelineconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PipelineConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Pipeline.ts#L61-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Pipeline.ts#L67-L81 type PipelineConfig struct { // Description Description of the ingest pipeline. Description *string `json:"description,omitempty"` @@ -90,3 +90,5 @@ func NewPipelineConfig() *PipelineConfig { return r } + +// false diff --git a/typedapi/types/pipelinemetadata.go b/typedapi/types/pipelinemetadata.go index b4aefd840a..8b5b32e789 100644 --- a/typedapi/types/pipelinemetadata.go +++ b/typedapi/types/pipelinemetadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PipelineMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/logstash/_types/Pipeline.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/logstash/_types/Pipeline.ts#L23-L26 type PipelineMetadata struct { Type string `json:"type"` Version string `json:"version"` @@ -87,3 +87,13 @@ func NewPipelineMetadata() *PipelineMetadata { return r } + +// true + +type PipelineMetadataVariant interface { + PipelineMetadataCaster() *PipelineMetadata +} + +func (s *PipelineMetadata) PipelineMetadataCaster() *PipelineMetadata { + return s +} diff --git a/typedapi/types/pipelineprocessor.go b/typedapi/types/pipelineprocessor.go index 2415a4efbe..2c9b9fdd0e 100644 --- a/typedapi/types/pipelineprocessor.go +++ b/typedapi/types/pipelineprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PipelineProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L928-L939 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1266-L1277 type PipelineProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -152,3 +152,13 @@ func NewPipelineProcessor() *PipelineProcessor { return r } + +// true + +type PipelineProcessorVariant interface { + PipelineProcessorCaster() *PipelineProcessor +} + +func (s *PipelineProcessor) PipelineProcessorCaster() *PipelineProcessor { + return s +} diff --git a/typedapi/types/pipelinesettings.go b/typedapi/types/pipelinesettings.go index 74f4cc063f..e5dd9150bf 100644 --- a/typedapi/types/pipelinesettings.go +++ b/typedapi/types/pipelinesettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PipelineSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/logstash/_types/Pipeline.ts#L28-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/logstash/_types/Pipeline.ts#L28-L59 type PipelineSettings struct { // PipelineBatchDelay When creating pipeline event batches, how long in milliseconds to wait for // each event before dispatching an undersized batch to pipeline workers. @@ -184,3 +184,13 @@ func NewPipelineSettings() *PipelineSettings { return r } + +// true + +type PipelineSettingsVariant interface { + PipelineSettingsCaster() *PipelineSettings +} + +func (s *PipelineSettings) PipelineSettingsCaster() *PipelineSettings { + return s +} diff --git a/typedapi/types/pipelinesimulation.go b/typedapi/types/pipelinesimulation.go index 60c217d253..09eb65c208 100644 --- a/typedapi/types/pipelinesimulation.go +++ b/typedapi/types/pipelinesimulation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,13 +33,15 @@ import ( // PipelineSimulation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/simulate/types.ts#L33-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Simulation.ts#L52-L60 type PipelineSimulation struct { - Doc *DocumentSimulation `json:"doc,omitempty"` - ProcessorResults []PipelineSimulation `json:"processor_results,omitempty"` - ProcessorType *string `json:"processor_type,omitempty"` - Status *actionstatusoptions.ActionStatusOptions `json:"status,omitempty"` - Tag *string `json:"tag,omitempty"` + Description *string `json:"description,omitempty"` + Doc *DocumentSimulation `json:"doc,omitempty"` + Error *ErrorCause `json:"error,omitempty"` + IgnoredError *ErrorCause `json:"ignored_error,omitempty"` + ProcessorType *string `json:"processor_type,omitempty"` + Status *actionstatusoptions.ActionStatusOptions `json:"status,omitempty"` + Tag *string `json:"tag,omitempty"` } func (s *PipelineSimulation) UnmarshalJSON(data []byte) error { @@ -57,14 +59,31 @@ func (s *PipelineSimulation) UnmarshalJSON(data []byte) error { switch t { + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + case "doc": if err := dec.Decode(&s.Doc); err != nil { return fmt.Errorf("%s | %w", "Doc", err) } - case "processor_results": - if err := dec.Decode(&s.ProcessorResults); err != nil { - return fmt.Errorf("%s | %w", "ProcessorResults", err) + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "ignored_error": + if err := dec.Decode(&s.IgnoredError); err != nil { + return fmt.Errorf("%s | %w", "IgnoredError", err) } case "processor_type": @@ -107,3 +126,5 @@ func NewPipelineSimulation() *PipelineSimulation { return r } + +// false diff --git a/typedapi/types/pipeseparatedflagssimplequerystringflag.go b/typedapi/types/pipeseparatedflagssimplequerystringflag.go index 3e0596c549..bc77a8a700 100644 --- a/typedapi/types/pipeseparatedflagssimplequerystringflag.go +++ b/typedapi/types/pipeseparatedflagssimplequerystringflag.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // simplequerystringflag.SimpleQueryStringFlag // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_spec_utils/PipeSeparatedFlags.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_spec_utils/PipeSeparatedFlags.ts#L20-L27 type PipeSeparatedFlagsSimpleQueryStringFlag any + +type PipeSeparatedFlagsSimpleQueryStringFlagVariant interface { + PipeSeparatedFlagsSimpleQueryStringFlagCaster() *PipeSeparatedFlagsSimpleQueryStringFlag +} diff --git a/typedapi/types/pivot.go b/typedapi/types/pivot.go index 35388b9781..b5d84ee4c4 100644 --- a/typedapi/types/pivot.go +++ b/typedapi/types/pivot.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Pivot type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L54-L68 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L54-L68 type Pivot struct { // Aggregations Defines how to aggregate the grouped data. The following aggregations are // currently supported: average, bucket @@ -41,9 +41,19 @@ type Pivot struct { // NewPivot returns a Pivot. func NewPivot() *Pivot { r := &Pivot{ - Aggregations: make(map[string]Aggregations, 0), - GroupBy: make(map[string]PivotGroupByContainer, 0), + Aggregations: make(map[string]Aggregations), + GroupBy: make(map[string]PivotGroupByContainer), } return r } + +// true + +type PivotVariant interface { + PivotCaster() *Pivot +} + +func (s *Pivot) PivotCaster() *Pivot { + return s +} diff --git a/typedapi/types/pivotgroupbycontainer.go b/typedapi/types/pivotgroupbycontainer.go index 24f3333c53..d5baa4af64 100644 --- a/typedapi/types/pivotgroupbycontainer.go +++ b/typedapi/types/pivotgroupbycontainer.go @@ -16,23 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // PivotGroupByContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L70-L78 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L70-L78 type PivotGroupByContainer struct { - DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` - GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` - Histogram *HistogramAggregation `json:"histogram,omitempty"` - Terms *TermsAggregation `json:"terms,omitempty"` + AdditionalPivotGroupByContainerProperty map[string]json.RawMessage `json:"-"` + DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` + GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` + Histogram *HistogramAggregation `json:"histogram,omitempty"` + Terms *TermsAggregation `json:"terms,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s PivotGroupByContainer) MarshalJSON() ([]byte, error) { + type opt PivotGroupByContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalPivotGroupByContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalPivotGroupByContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewPivotGroupByContainer returns a PivotGroupByContainer. func NewPivotGroupByContainer() *PivotGroupByContainer { - r := &PivotGroupByContainer{} + r := &PivotGroupByContainer{ + AdditionalPivotGroupByContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type PivotGroupByContainerVariant interface { + PivotGroupByContainerCaster() *PivotGroupByContainer +} + +func (s *PivotGroupByContainer) PivotGroupByContainerCaster() *PivotGroupByContainer { + return s +} diff --git a/typedapi/types/pluginsrecord.go b/typedapi/types/pluginsrecord.go index f1288e015a..7dce985341 100644 --- a/typedapi/types/pluginsrecord.go +++ b/typedapi/types/pluginsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PluginsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/plugins/types.ts#L22-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/plugins/types.ts#L22-L52 type PluginsRecord struct { // Component The component name. Component *string `json:"component,omitempty"` @@ -124,3 +124,5 @@ func NewPluginsRecord() *PluginsRecord { return r } + +// false diff --git a/typedapi/types/pluginsstatus.go b/typedapi/types/pluginsstatus.go index 0e20085311..02466087ab 100644 --- a/typedapi/types/pluginsstatus.go +++ b/typedapi/types/pluginsstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // PluginsStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L60-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L60-L62 type PluginsStatus struct { Status shutdownstatus.ShutdownStatus `json:"status"` } @@ -37,3 +37,5 @@ func NewPluginsStatus() *PluginsStatus { return r } + +// false diff --git a/typedapi/types/pluginstats.go b/typedapi/types/pluginstats.go index 901815bbb0..ef9382c772 100644 --- a/typedapi/types/pluginstats.go +++ b/typedapi/types/pluginstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PluginStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L180-L190 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L183-L193 type PluginStats struct { Classname string `json:"classname"` Description string `json:"description"` @@ -147,3 +147,5 @@ func NewPluginStats() *PluginStats { return r } + +// false diff --git a/typedapi/types/pointintimereference.go b/typedapi/types/pointintimereference.go index 5419c1eb6e..a980a1a904 100644 --- a/typedapi/types/pointintimereference.go +++ b/typedapi/types/pointintimereference.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // PointInTimeReference type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/PointInTimeReference.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/PointInTimeReference.ts#L23-L26 type PointInTimeReference struct { Id string `json:"id"` KeepAlive Duration `json:"keep_alive,omitempty"` @@ -72,3 +72,13 @@ func NewPointInTimeReference() *PointInTimeReference { return r } + +// true + +type PointInTimeReferenceVariant interface { + PointInTimeReferenceCaster() *PointInTimeReference +} + +func (s *PointInTimeReference) PointInTimeReferenceCaster() *PointInTimeReference { + return s +} diff --git a/typedapi/types/pointproperty.go b/typedapi/types/pointproperty.go index 5078453f88..e7c6e020fb 100644 --- a/typedapi/types/pointproperty.go +++ b/typedapi/types/pointproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // PointProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/geo.ts#L66-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/geo.ts#L66-L71 type PointProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -43,12 +44,12 @@ type PointProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` IgnoreZValue *bool `json:"ignore_z_value,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *string `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *PointProperty) UnmarshalJSON(data []byte) error { @@ -120,301 +121,313 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -503,318 +516,318 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -829,6 +842,11 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -843,19 +861,19 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { func (s PointProperty) MarshalJSON() ([]byte, error) { type innerPointProperty PointProperty tmp := innerPointProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - IgnoreZValue: s.IgnoreZValue, - Meta: s.Meta, - NullValue: s.NullValue, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "point" @@ -866,10 +884,20 @@ func (s PointProperty) MarshalJSON() ([]byte, error) { // NewPointProperty returns a PointProperty. func NewPointProperty() *PointProperty { r := &PointProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type PointPropertyVariant interface { + PointPropertyCaster() *PointProperty +} + +func (s *PointProperty) PointPropertyCaster() *PointProperty { + return s +} diff --git a/typedapi/types/pool.go b/typedapi/types/pool.go index a810df6f7d..2a5e2ad206 100644 --- a/typedapi/types/pool.go +++ b/typedapi/types/pool.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Pool type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L878-L895 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L949-L966 type Pool struct { // MaxInBytes Maximum amount of memory, in bytes, available for use by the heap. MaxInBytes *int64 `json:"max_in_bytes,omitempty"` @@ -129,3 +129,5 @@ func NewPool() *Pool { return r } + +// false diff --git a/typedapi/types/porterstemtokenfilter.go b/typedapi/types/porterstemtokenfilter.go index 3d6602f289..2c382eb56f 100644 --- a/typedapi/types/porterstemtokenfilter.go +++ b/typedapi/types/porterstemtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // PorterStemTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L294-L296 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L293-L295 type PorterStemTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewPorterStemTokenFilter() *PorterStemTokenFilter { return r } + +// true + +type PorterStemTokenFilterVariant interface { + PorterStemTokenFilterCaster() *PorterStemTokenFilter +} + +func (s *PorterStemTokenFilter) PorterStemTokenFilterCaster() *PorterStemTokenFilter { + return s +} diff --git a/typedapi/types/portugueseanalyzer.go b/typedapi/types/portugueseanalyzer.go new file mode 100644 index 0000000000..32118aa9a5 --- /dev/null +++ b/typedapi/types/portugueseanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PortugueseAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L241-L246 +type PortugueseAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *PortugueseAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PortugueseAnalyzer) MarshalJSON() ([]byte, error) { + type innerPortugueseAnalyzer PortugueseAnalyzer + tmp := innerPortugueseAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "portuguese" + + return json.Marshal(tmp) +} + +// NewPortugueseAnalyzer returns a PortugueseAnalyzer. +func NewPortugueseAnalyzer() *PortugueseAnalyzer { + r := &PortugueseAnalyzer{} + + return r +} + +// true + +type PortugueseAnalyzerVariant interface { + PortugueseAnalyzerCaster() *PortugueseAnalyzer +} + +func (s *PortugueseAnalyzer) PortugueseAnalyzerCaster() *PortugueseAnalyzer { + return s +} diff --git a/typedapi/types/postmigrationfeature.go b/typedapi/types/postmigrationfeature.go index 3a1e3819c0..5953fcd565 100644 --- a/typedapi/types/postmigrationfeature.go +++ b/typedapi/types/postmigrationfeature.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PostMigrationFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L27-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L27-L29 type PostMigrationFeature struct { FeatureName string `json:"feature_name"` } @@ -74,3 +74,5 @@ func NewPostMigrationFeature() *PostMigrationFeature { return r } + +// false diff --git a/typedapi/types/predicatetokenfilter.go b/typedapi/types/predicatetokenfilter.go index 3433111dea..ac2fe5927d 100644 --- a/typedapi/types/predicatetokenfilter.go +++ b/typedapi/types/predicatetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // PredicateTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L298-L301 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L297-L300 type PredicateTokenFilter struct { Script Script `json:"script"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewPredicateTokenFilter() *PredicateTokenFilter { return r } + +// true + +type PredicateTokenFilterVariant interface { + PredicateTokenFilterCaster() *PredicateTokenFilter +} + +func (s *PredicateTokenFilter) PredicateTokenFilterCaster() *PredicateTokenFilter { + return s +} diff --git a/typedapi/types/predictedvalue.go b/typedapi/types/predictedvalue.go index dd4d0e5f68..b8f84bcd2e 100644 --- a/typedapi/types/predictedvalue.go +++ b/typedapi/types/predictedvalue.go @@ -16,16 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types -// PredictedValue holds the union for the following types: +// PredictedValue type alias. // -// string -// Float64 -// bool -// int -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L457-L457 -type PredictedValue any +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L445-L445 +type PredictedValue []ScalarValue diff --git a/typedapi/types/prefixquery.go b/typedapi/types/prefixquery.go index d1dad92cb8..ed92ba8276 100644 --- a/typedapi/types/prefixquery.go +++ b/typedapi/types/prefixquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PrefixQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L88-L107 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L98-L120 type PrefixQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -148,3 +148,13 @@ func NewPrefixQuery() *PrefixQuery { return r } + +// true + +type PrefixQueryVariant interface { + PrefixQueryCaster() *PrefixQuery +} + +func (s *PrefixQuery) PrefixQueryCaster() *PrefixQuery { + return s +} diff --git a/typedapi/types/preprocessor.go b/typedapi/types/preprocessor.go index 72787544a7..57f17c0b43 100644 --- a/typedapi/types/preprocessor.go +++ b/typedapi/types/preprocessor.go @@ -16,22 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // Preprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L31-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L31-L36 type Preprocessor struct { - FrequencyEncoding *FrequencyEncodingPreprocessor `json:"frequency_encoding,omitempty"` - OneHotEncoding *OneHotEncodingPreprocessor `json:"one_hot_encoding,omitempty"` - TargetMeanEncoding *TargetMeanEncodingPreprocessor `json:"target_mean_encoding,omitempty"` + AdditionalPreprocessorProperty map[string]json.RawMessage `json:"-"` + FrequencyEncoding *FrequencyEncodingPreprocessor `json:"frequency_encoding,omitempty"` + OneHotEncoding *OneHotEncodingPreprocessor `json:"one_hot_encoding,omitempty"` + TargetMeanEncoding *TargetMeanEncodingPreprocessor `json:"target_mean_encoding,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s Preprocessor) MarshalJSON() ([]byte, error) { + type opt Preprocessor + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalPreprocessorProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalPreprocessorProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewPreprocessor returns a Preprocessor. func NewPreprocessor() *Preprocessor { - r := &Preprocessor{} + r := &Preprocessor{ + AdditionalPreprocessorProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type PreprocessorVariant interface { + PreprocessorCaster() *Preprocessor +} + +func (s *Preprocessor) PreprocessorCaster() *Preprocessor { + return s +} diff --git a/typedapi/types/pressurememory.go b/typedapi/types/pressurememory.go index 53045fe685..b234da04e4 100644 --- a/typedapi/types/pressurememory.go +++ b/typedapi/types/pressurememory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L144-L199 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L144-L199 type PressureMemory struct { // All Memory consumed by indexing requests in the coordinating, primary, or replica // stage. @@ -239,3 +239,5 @@ func NewPressureMemory() *PressureMemory { return r } + +// false diff --git a/typedapi/types/privileges.go b/typedapi/types/privileges.go index d06af8485a..215983c681 100644 --- a/typedapi/types/privileges.go +++ b/typedapi/types/privileges.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Privileges type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges/types.ts#L48-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges/types.ts#L49-L49 type Privileges map[string]bool diff --git a/typedapi/types/privilegesactions.go b/typedapi/types/privilegesactions.go index 26649ee8a6..6482813b8e 100644 --- a/typedapi/types/privilegesactions.go +++ b/typedapi/types/privilegesactions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PrivilegesActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/put_privileges/types.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/put_privileges/types.ts#L22-L27 type PrivilegesActions struct { Actions []string `json:"actions"` Application *string `json:"application,omitempty"` @@ -92,3 +92,13 @@ func NewPrivilegesActions() *PrivilegesActions { return r } + +// true + +type PrivilegesActionsVariant interface { + PrivilegesActionsCaster() *PrivilegesActions +} + +func (s *PrivilegesActions) PrivilegesActionsCaster() *PrivilegesActions { + return s +} diff --git a/typedapi/types/privilegescheck.go b/typedapi/types/privilegescheck.go index cb27e8d81a..f2a3570f00 100644 --- a/typedapi/types/privilegescheck.go +++ b/typedapi/types/privilegescheck.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // PrivilegesCheck type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges_user_profile/types.ts#L30-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges_user_profile/types.ts#L30-L37 type PrivilegesCheck struct { Application []ApplicationPrivilegesCheck `json:"application,omitempty"` // Cluster A list of the cluster privileges that you want to check. @@ -40,3 +40,13 @@ func NewPrivilegesCheck() *PrivilegesCheck { return r } + +// true + +type PrivilegesCheckVariant interface { + PrivilegesCheckCaster() *PrivilegesCheck +} + +func (s *PrivilegesCheck) PrivilegesCheckCaster() *PrivilegesCheck { + return s +} diff --git a/typedapi/types/process.go b/typedapi/types/process.go index 108c28a001..4f3362af63 100644 --- a/typedapi/types/process.go +++ b/typedapi/types/process.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Process type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L953-L975 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L1024-L1046 type Process struct { // Cpu Contains CPU statistics for the node. Cpu *Cpu `json:"cpu,omitempty"` @@ -131,3 +131,5 @@ func NewProcess() *Process { return r } + +// false diff --git a/typedapi/types/processor.go b/typedapi/types/processor.go index 61dbe809d2..17bc82495d 100644 --- a/typedapi/types/processor.go +++ b/typedapi/types/processor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Processor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L384-L401 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L420-L437 type Processor struct { // Count Number of documents transformed by the processor. Count *int64 `json:"count,omitempty"` @@ -119,3 +119,5 @@ func NewProcessor() *Processor { return r } + +// false diff --git a/typedapi/types/processorcontainer.go b/typedapi/types/processorcontainer.go index 4e989e625c..d729cdbb4b 100644 --- a/typedapi/types/processorcontainer.go +++ b/typedapi/types/processorcontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // ProcessorContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L27-L239 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L27-L301 type ProcessorContainer struct { + AdditionalProcessorContainerProperty map[string]json.RawMessage `json:"-"` // Append Appends one or more values to an existing array if the field already exists // and it is an array. // Converts a scalar to an array and appends one or more values to it if the @@ -47,6 +53,10 @@ type ProcessorContainer struct { // Circle Converts circle definitions of shapes to regular polygons which approximate // them. Circle *CircleProcessor `json:"circle,omitempty"` + // CommunityId Computes the Community ID for network flow data as defined in the + // Community ID Specification. You can use a community ID to correlate network + // events related to a single flow. + CommunityId *CommunityIDProcessor `json:"community_id,omitempty"` // Convert Converts a field in the currently ingested document to a different type, such // as converting a string to an integer. // If the field value is an array, all members will be converted. @@ -79,8 +89,16 @@ type ProcessorContainer struct { // This is useful for when you expect a pipeline to fail and want to relay a // specific message to the requester. Fail *FailProcessor `json:"fail,omitempty"` + // Fingerprint Computes a hash of the document’s content. You can use this hash for + // content fingerprinting. + Fingerprint *FingerprintProcessor `json:"fingerprint,omitempty"` // Foreach Runs an ingest processor on each element of an array or object. Foreach *ForeachProcessor `json:"foreach,omitempty"` + // GeoGrid Converts geo-grid definitions of grid tiles or cells to regular bounding + // boxes or polygons which describe their shape. + // This is useful if there is a need to interact with the tile shapes as + // spatially indexable fields. + GeoGrid *GeoGridProcessor `json:"geo_grid,omitempty"` // Geoip The `geoip` processor adds information about the geographical location of an // IPv4 or IPv6 address. Geoip *GeoIpProcessor `json:"geoip,omitempty"` @@ -96,10 +114,16 @@ type ProcessorContainer struct { // If any non-string values are encountered, the processor will throw an // exception. Gsub *GsubProcessor `json:"gsub,omitempty"` + // HtmlStrip Removes HTML tags from the field. + // If the field is an array of strings, HTML tags will be removed from all + // members of the array. + HtmlStrip *HtmlStripProcessor `json:"html_strip,omitempty"` // Inference Uses a pre-trained data frame analytics model or a model deployed for natural // language processing tasks to infer against the data that is being ingested in // the pipeline. Inference *InferenceProcessor `json:"inference,omitempty"` + // IpLocation Currently an undocumented alias for GeoIP Processor. + IpLocation *IpLocationProcessor `json:"ip_location,omitempty"` // Join Joins each element of an array into a single string using a separator // character between each element. // Throws an error when the field is not an array. @@ -113,8 +137,24 @@ type ProcessorContainer struct { // If the field is an array of strings, all members of the array will be // converted. Lowercase *LowercaseProcessor `json:"lowercase,omitempty"` + // NetworkDirection Calculates the network direction given a source IP address, destination IP + // address, and a list of internal networks. + NetworkDirection *NetworkDirectionProcessor `json:"network_direction,omitempty"` // Pipeline Executes another pipeline. Pipeline *PipelineProcessor `json:"pipeline,omitempty"` + // Redact The Redact processor uses the Grok rules engine to obscure text in the input + // document matching the given Grok patterns. + // The processor can be used to obscure Personal Identifying Information (PII) + // by configuring it to detect known patterns such as email or IP addresses. + // Text that matches a Grok pattern is replaced with a configurable string such + // as `` where an email address is matched or simply replace all matches + // with the text `` if preferred. + Redact *RedactProcessor `json:"redact,omitempty"` + // RegisteredDomain Extracts the registered domain (also known as the effective top-level + // domain or eTLD), sub-domain, and top-level domain from a fully qualified + // domain name (FQDN). Uses the registered domains defined in the Mozilla + // Public Suffix List. + RegisteredDomain *RegisteredDomainProcessor `json:"registered_domain,omitempty"` // Remove Removes existing fields. // If one field doesn’t exist, an exception will be thrown. Remove *RemoveProcessor `json:"remove,omitempty"` @@ -149,6 +189,10 @@ type ProcessorContainer struct { // Split Splits a field into an array using a separator character. // Only works on string fields. Split *SplitProcessor `json:"split,omitempty"` + // Terminate Terminates the current ingest pipeline, causing no further processors to be + // run. + // This will normally be executed conditionally, using the `if` option. + Terminate *TerminateProcessor `json:"terminate,omitempty"` // Trim Trims whitespace from a field. // If the field is an array of strings, all members of the array will be // trimmed. @@ -158,6 +202,11 @@ type ProcessorContainer struct { // If the field is an array of strings, all members of the array will be // converted. Uppercase *UppercaseProcessor `json:"uppercase,omitempty"` + // UriParts Parses a Uniform Resource Identifier (URI) string and extracts its components + // as an object. + // This URI object includes properties for the URI’s domain, path, fragment, + // port, query, scheme, user info, username, and password. + UriParts *UriPartsProcessor `json:"uri_parts,omitempty"` // Urldecode URL-decodes a string. // If the field is an array of strings, all members of the array will be // decoded. @@ -168,9 +217,50 @@ type ProcessorContainer struct { UserAgent *UserAgentProcessor `json:"user_agent,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s ProcessorContainer) MarshalJSON() ([]byte, error) { + type opt ProcessorContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalProcessorContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalProcessorContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewProcessorContainer returns a ProcessorContainer. func NewProcessorContainer() *ProcessorContainer { - r := &ProcessorContainer{} + r := &ProcessorContainer{ + AdditionalProcessorContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type ProcessorContainerVariant interface { + ProcessorContainerCaster() *ProcessorContainer +} + +func (s *ProcessorContainer) ProcessorContainerCaster() *ProcessorContainer { + return s +} diff --git a/typedapi/types/profile.go b/typedapi/types/profile.go index c8c36c8f3b..9568a777c3 100644 --- a/typedapi/types/profile.go +++ b/typedapi/types/profile.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Profile type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L93-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L101-L103 type Profile struct { Shards []ShardProfile `json:"shards"` } @@ -33,3 +33,5 @@ func NewProfile() *Profile { return r } + +// false diff --git a/typedapi/types/property.go b/typedapi/types/property.go index 254752f341..66c0e68dc1 100644 --- a/typedapi/types/property.go +++ b/typedapi/types/property.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -42,10 +42,12 @@ package types // FlattenedProperty // NestedProperty // ObjectProperty +// PassthroughObjectProperty // SemanticTextProperty // SparseVectorProperty // CompletionProperty // ConstantKeywordProperty +// CountedKeywordProperty // FieldAliasProperty // HistogramProperty // IpProperty @@ -72,5 +74,9 @@ package types // LongRangeProperty // IcuCollationProperty // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/Property.ts#L96-L164 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/Property.ts#L119-L189 type Property any + +type PropertyVariant interface { + PropertyCaster() *Property +} diff --git a/typedapi/types/publishedclusterstates.go b/typedapi/types/publishedclusterstates.go index 1475389d4a..ae98602dae 100644 --- a/typedapi/types/publishedclusterstates.go +++ b/typedapi/types/publishedclusterstates.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // PublishedClusterStates type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L263-L276 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L263-L276 type PublishedClusterStates struct { // CompatibleDiffs Number of compatible differences between published cluster states. CompatibleDiffs *int64 `json:"compatible_diffs,omitempty"` @@ -112,3 +112,5 @@ func NewPublishedClusterStates() *PublishedClusterStates { return r } + +// false diff --git a/typedapi/types/queries.go b/typedapi/types/queries.go index 843961198e..f5d4e7ca8d 100644 --- a/typedapi/types/queries.go +++ b/typedapi/types/queries.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Queries type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L403-L405 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L417-L419 type Queries struct { Cache *CacheQueries `json:"cache,omitempty"` } @@ -33,3 +33,13 @@ func NewQueries() *Queries { return r } + +// true + +type QueriesVariant interface { + QueriesCaster() *Queries +} + +func (s *Queries) QueriesCaster() *Queries { + return s +} diff --git a/typedapi/types/query.go b/typedapi/types/query.go index 34b357d8fc..56d2fdf027 100644 --- a/typedapi/types/query.go +++ b/typedapi/types/query.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,8 +30,9 @@ import ( // Query type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/abstractions.ts#L102-L427 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/abstractions.ts#L103-L434 type Query struct { + AdditionalQueryProperty map[string]json.RawMessage `json:"-"` // Bool matches documents matching boolean combinations of other queries. Bool *BoolQuery `json:"bool,omitempty"` // Boosting Returns documents matching a `positive` query while reducing the relevance @@ -68,7 +69,10 @@ type Query struct { // GeoDistance Matches `geo_point` and `geo_shape` values within a given distance of a // geopoint. GeoDistance *GeoDistanceQuery `json:"geo_distance,omitempty"` - GeoPolygon *GeoPolygonQuery `json:"geo_polygon,omitempty"` + // GeoGrid Matches `geo_point` and `geo_shape` values that intersect a grid cell from a + // GeoGrid aggregation. + GeoGrid map[string]GeoGridQuery `json:"geo_grid,omitempty"` + GeoPolygon *GeoPolygonQuery `json:"geo_polygon,omitempty"` // GeoShape Filter documents indexed using either the `geo_shape` or the `geo_point` // type. GeoShape *GeoShapeQuery `json:"geo_shape,omitempty"` @@ -290,6 +294,14 @@ func (s *Query) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "GeoDistance", err) } + case "geo_grid": + if s.GeoGrid == nil { + s.GeoGrid = make(map[string]GeoGridQuery, 0) + } + if err := dec.Decode(&s.GeoGrid); err != nil { + return fmt.Errorf("%s | %w", "GeoGrid", err) + } + case "geo_polygon": if err := dec.Decode(&s.GeoPolygon); err != nil { return fmt.Errorf("%s | %w", "GeoPolygon", err) @@ -583,31 +595,85 @@ func (s *Query) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wrapper", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalQueryProperty == nil { + s.AdditionalQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalQueryProperty", err) + } + s.AdditionalQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s Query) MarshalJSON() ([]byte, error) { + type opt Query + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewQuery returns a Query. func NewQuery() *Query { r := &Query{ - Common: make(map[string]CommonTermsQuery, 0), - Fuzzy: make(map[string]FuzzyQuery, 0), - Intervals: make(map[string]IntervalsQuery, 0), - Match: make(map[string]MatchQuery, 0), - MatchBoolPrefix: make(map[string]MatchBoolPrefixQuery, 0), - MatchPhrase: make(map[string]MatchPhraseQuery, 0), - MatchPhrasePrefix: make(map[string]MatchPhrasePrefixQuery, 0), - Prefix: make(map[string]PrefixQuery, 0), - Range: make(map[string]RangeQuery, 0), - Regexp: make(map[string]RegexpQuery, 0), - SpanTerm: make(map[string]SpanTermQuery, 0), - Term: make(map[string]TermQuery, 0), - TermsSet: make(map[string]TermsSetQuery, 0), - TextExpansion: make(map[string]TextExpansionQuery, 0), - WeightedTokens: make(map[string]WeightedTokensQuery, 0), - Wildcard: make(map[string]WildcardQuery, 0), + AdditionalQueryProperty: make(map[string]json.RawMessage), + Common: make(map[string]CommonTermsQuery), + Fuzzy: make(map[string]FuzzyQuery), + GeoGrid: make(map[string]GeoGridQuery), + Intervals: make(map[string]IntervalsQuery), + Match: make(map[string]MatchQuery), + MatchBoolPrefix: make(map[string]MatchBoolPrefixQuery), + MatchPhrase: make(map[string]MatchPhraseQuery), + MatchPhrasePrefix: make(map[string]MatchPhrasePrefixQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Regexp: make(map[string]RegexpQuery), + SpanTerm: make(map[string]SpanTermQuery), + Term: make(map[string]TermQuery), + TermsSet: make(map[string]TermsSetQuery), + TextExpansion: make(map[string]TextExpansionQuery), + WeightedTokens: make(map[string]WeightedTokensQuery), + Wildcard: make(map[string]WildcardQuery), } return r } + +// true + +type QueryVariant interface { + QueryCaster() *Query +} + +func (s *Query) QueryCaster() *Query { + return s +} diff --git a/typedapi/types/querybreakdown.go b/typedapi/types/querybreakdown.go index 3132bd7ee3..6f7e7d0aec 100644 --- a/typedapi/types/querybreakdown.go +++ b/typedapi/types/querybreakdown.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // QueryBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L97-L116 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L105-L126 type QueryBreakdown struct { Advance int64 `json:"advance"` AdvanceCount int64 `json:"advance_count"` @@ -39,6 +39,8 @@ type QueryBreakdown struct { BuildScorerCount int64 `json:"build_scorer_count"` ComputeMaxScore int64 `json:"compute_max_score"` ComputeMaxScoreCount int64 `json:"compute_max_score_count"` + CountWeight int64 `json:"count_weight"` + CountWeightCount int64 `json:"count_weight_count"` CreateWeight int64 `json:"create_weight"` CreateWeightCount int64 `json:"create_weight_count"` Match int64 `json:"match"` @@ -158,6 +160,36 @@ func (s *QueryBreakdown) UnmarshalJSON(data []byte) error { s.ComputeMaxScoreCount = f } + case "count_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CountWeight", err) + } + s.CountWeight = value + case float64: + f := int64(v) + s.CountWeight = f + } + + case "count_weight_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CountWeightCount", err) + } + s.CountWeightCount = value + case float64: + f := int64(v) + s.CountWeightCount = f + } + case "create_weight": var tmp any dec.Decode(&tmp) @@ -349,3 +381,5 @@ func NewQueryBreakdown() *QueryBreakdown { return r } + +// false diff --git a/typedapi/types/querycachestats.go b/typedapi/types/querycachestats.go index 7bc83afb05..341be7b2cb 100644 --- a/typedapi/types/querycachestats.go +++ b/typedapi/types/querycachestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // QueryCacheStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L192-L226 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L195-L229 type QueryCacheStats struct { // CacheCount Total number of entries added to the query cache across all shards assigned // to selected nodes. @@ -195,3 +195,5 @@ func NewQueryCacheStats() *QueryCacheStats { return r } + +// false diff --git a/typedapi/types/queryprofile.go b/typedapi/types/queryprofile.go index f1458a6187..daabb52058 100644 --- a/typedapi/types/queryprofile.go +++ b/typedapi/types/queryprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // QueryProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L118-L124 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L128-L134 type QueryProfile struct { Breakdown QueryBreakdown `json:"breakdown"` Children []QueryProfile `json:"children,omitempty"` @@ -105,3 +105,5 @@ func NewQueryProfile() *QueryProfile { return r } + +// false diff --git a/typedapi/types/queryrole.go b/typedapi/types/queryrole.go index 1e5439d6df..9efa24425d 100644 --- a/typedapi/types/queryrole.go +++ b/typedapi/types/queryrole.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // QueryRole type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_role/types.ts#L103-L109 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_role/types.ts#L103-L109 type QueryRole struct { // Applications A list of application privilege entries Applications []ApplicationPrivileges `json:"applications,omitempty"` @@ -53,9 +53,17 @@ type QueryRole struct { Metadata Metadata `json:"metadata,omitempty"` // Name Name of the role. Name string `json:"name"` - // RunAs A list of users that the API keys can impersonate. *Note*: in Serverless, the - // run-as feature is disabled. For API compatibility, you can still specify an - // empty `run_as` field, but a non-empty list will be rejected. + // RemoteCluster A list of cluster permissions for remote clusters. + // NOTE: This is limited a subset of the cluster permissions. + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of indices permissions for remote clusters. + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // Restriction Restriction for when the role descriptor is allowed to be effective. + Restriction *Restriction `json:"restriction,omitempty"` + // RunAs A list of users that the API keys can impersonate. + // NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + // For API compatibility, you can still specify an empty `run_as` field, but a + // non-empty list will be rejected. RunAs []string `json:"run_as,omitempty"` Sort_ []FieldValue `json:"_sort,omitempty"` TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` @@ -136,6 +144,21 @@ func (s *QueryRole) UnmarshalJSON(data []byte) error { } s.Name = o + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "restriction": + if err := dec.Decode(&s.Restriction); err != nil { + return fmt.Errorf("%s | %w", "Restriction", err) + } + case "run_as": if err := dec.Decode(&s.RunAs); err != nil { return fmt.Errorf("%s | %w", "RunAs", err) @@ -162,8 +185,10 @@ func (s *QueryRole) UnmarshalJSON(data []byte) error { // NewQueryRole returns a QueryRole. func NewQueryRole() *QueryRole { r := &QueryRole{ - TransientMetadata: make(map[string]json.RawMessage, 0), + TransientMetadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/queryrule.go b/typedapi/types/queryrule.go index 53de7da314..f5bf78022f 100644 --- a/typedapi/types/queryrule.go +++ b/typedapi/types/queryrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,13 +33,23 @@ import ( // QueryRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/_types/QueryRuleset.ts#L36-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/_types/QueryRuleset.ts#L36-L58 type QueryRule struct { - Actions QueryRuleActions `json:"actions"` - Criteria []QueryRuleCriteria `json:"criteria"` - Priority *int `json:"priority,omitempty"` - RuleId string `json:"rule_id"` - Type queryruletype.QueryRuleType `json:"type"` + // Actions The actions to take when the rule is matched. + // The format of this action depends on the rule type. + Actions QueryRuleActions `json:"actions"` + // Criteria The criteria that must be met for the rule to be applied. + // If multiple criteria are specified for a rule, all criteria must be met for + // the rule to be applied. + Criteria []QueryRuleCriteria `json:"criteria"` + Priority *int `json:"priority,omitempty"` + // RuleId A unique identifier for the rule. + RuleId string `json:"rule_id"` + // Type The type of rule. + // `pinned` will identify and pin specific documents to the top of search + // results. + // `exclude` will exclude specific documents from search results. + Type queryruletype.QueryRuleType `json:"type"` } func (s *QueryRule) UnmarshalJSON(data []byte) error { @@ -115,3 +125,13 @@ func NewQueryRule() *QueryRule { return r } + +// true + +type QueryRuleVariant interface { + QueryRuleCaster() *QueryRule +} + +func (s *QueryRule) QueryRuleCaster() *QueryRule { + return s +} diff --git a/typedapi/types/queryruleactions.go b/typedapi/types/queryruleactions.go index 1126bdc552..fa695caa70 100644 --- a/typedapi/types/queryruleactions.go +++ b/typedapi/types/queryruleactions.go @@ -16,16 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // QueryRuleActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/_types/QueryRuleset.ts#L69-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/_types/QueryRuleset.ts#L110-L126 type QueryRuleActions struct { + // Docs The documents to apply the rule to. + // Only one of `ids` or `docs` may be specified and at least one must be + // specified. + // There is a maximum value of 100 documents in a rule. + // You can specify the following attributes for each document: + // + // * `_index`: The index of the document to pin. + // * `_id`: The unique document ID. Docs []PinnedDoc `json:"docs,omitempty"` - Ids []string `json:"ids,omitempty"` + // Ids The unique document IDs of the documents to apply the rule to. + // Only one of `ids` or `docs` may be specified and at least one must be + // specified. + Ids []string `json:"ids,omitempty"` } // NewQueryRuleActions returns a QueryRuleActions. @@ -34,3 +45,13 @@ func NewQueryRuleActions() *QueryRuleActions { return r } + +// true + +type QueryRuleActionsVariant interface { + QueryRuleActionsCaster() *QueryRuleActions +} + +func (s *QueryRuleActions) QueryRuleActionsCaster() *QueryRuleActions { + return s +} diff --git a/typedapi/types/queryrulecriteria.go b/typedapi/types/queryrulecriteria.go index 3032c99db8..45a7fbb627 100644 --- a/typedapi/types/queryrulecriteria.go +++ b/typedapi/types/queryrulecriteria.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,11 +33,40 @@ import ( // QueryRuleCriteria type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/_types/QueryRuleset.ts#L48-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/_types/QueryRuleset.ts#L65-L93 type QueryRuleCriteria struct { - Metadata *string `json:"metadata,omitempty"` - Type queryrulecriteriatype.QueryRuleCriteriaType `json:"type"` - Values []json.RawMessage `json:"values,omitempty"` + // Metadata The metadata field to match against. + // This metadata will be used to match against `match_criteria` sent in the + // rule. + // It is required for all criteria types except `always`. + Metadata *string `json:"metadata,omitempty"` + // Type The type of criteria. The following criteria types are supported: + // + // * `always`: Matches all queries, regardless of input. + // * `contains`: Matches that contain this value anywhere in the field meet the + // criteria defined by the rule. Only applicable for string values. + // * `exact`: Only exact matches meet the criteria defined by the rule. + // Applicable for string or numerical values. + // * `fuzzy`: Exact matches or matches within the allowed Levenshtein Edit + // Distance meet the criteria defined by the rule. Only applicable for string + // values. + // * `gt`: Matches with a value greater than this value meet the criteria + // defined by the rule. Only applicable for numerical values. + // * `gte`: Matches with a value greater than or equal to this value meet the + // criteria defined by the rule. Only applicable for numerical values. + // * `lt`: Matches with a value less than this value meet the criteria defined + // by the rule. Only applicable for numerical values. + // * `lte`: Matches with a value less than or equal to this value meet the + // criteria defined by the rule. Only applicable for numerical values. + // * `prefix`: Matches that start with this value meet the criteria defined by + // the rule. Only applicable for string values. + // * `suffix`: Matches that end with this value meet the criteria defined by the + // rule. Only applicable for string values. + Type queryrulecriteriatype.QueryRuleCriteriaType `json:"type"` + // Values The values to match against the `metadata` field. + // Only one value must match for the criteria to be met. + // It is required for all criteria types except `always`. + Values []json.RawMessage `json:"values,omitempty"` } func (s *QueryRuleCriteria) UnmarshalJSON(data []byte) error { @@ -88,3 +117,13 @@ func NewQueryRuleCriteria() *QueryRuleCriteria { return r } + +// true + +type QueryRuleCriteriaVariant interface { + QueryRuleCriteriaCaster() *QueryRuleCriteria +} + +func (s *QueryRuleCriteria) QueryRuleCriteriaCaster() *QueryRuleCriteria { + return s +} diff --git a/typedapi/types/queryrulesetlistitem.go b/typedapi/types/queryrulesetlistitem.go index 1f5bc2afc7..e54e460202 100644 --- a/typedapi/types/queryrulesetlistitem.go +++ b/typedapi/types/queryrulesetlistitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,13 +31,20 @@ import ( // QueryRulesetListItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/list_rulesets/types.ts#L23-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/list_rulesets/types.ts#L23-L44 type QueryRulesetListItem struct { - // RuleCriteriaTypesCounts A map of criteria type to the number of rules of that type + // RuleCriteriaTypesCounts A map of criteria type (for example, `exact`) to the number of rules of that + // type. + // + // NOTE: The counts in `rule_criteria_types_counts` may be larger than the value + // of `rule_total_count` because a rule may have multiple criteria. RuleCriteriaTypesCounts map[string]int `json:"rule_criteria_types_counts"` - // RuleTotalCount The number of rules associated with this ruleset + // RuleTotalCount The number of rules associated with the ruleset. RuleTotalCount int `json:"rule_total_count"` - // RulesetId Ruleset unique identifier + // RuleTypeCounts A map of rule type (for example, `pinned`) to the number of rules of that + // type. + RuleTypeCounts map[string]int `json:"rule_type_counts"` + // RulesetId A unique identifier for the ruleset. RulesetId string `json:"ruleset_id"` } @@ -80,6 +87,14 @@ func (s *QueryRulesetListItem) UnmarshalJSON(data []byte) error { s.RuleTotalCount = f } + case "rule_type_counts": + if s.RuleTypeCounts == nil { + s.RuleTypeCounts = make(map[string]int, 0) + } + if err := dec.Decode(&s.RuleTypeCounts); err != nil { + return fmt.Errorf("%s | %w", "RuleTypeCounts", err) + } + case "ruleset_id": if err := dec.Decode(&s.RulesetId); err != nil { return fmt.Errorf("%s | %w", "RulesetId", err) @@ -93,8 +108,11 @@ func (s *QueryRulesetListItem) UnmarshalJSON(data []byte) error { // NewQueryRulesetListItem returns a QueryRulesetListItem. func NewQueryRulesetListItem() *QueryRulesetListItem { r := &QueryRulesetListItem{ - RuleCriteriaTypesCounts: make(map[string]int, 0), + RuleCriteriaTypesCounts: make(map[string]int), + RuleTypeCounts: make(map[string]int), } return r } + +// false diff --git a/typedapi/types/queryruleset.go b/typedapi/types/queryrulesetmatchedrule.go similarity index 61% rename from typedapi/types/queryruleset.go rename to typedapi/types/queryrulesetmatchedrule.go index 2eba2b3fc3..15edf3af98 100644 --- a/typedapi/types/queryruleset.go +++ b/typedapi/types/queryrulesetmatchedrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,17 +28,17 @@ import ( "io" ) -// QueryRuleset type. +// QueryRulesetMatchedRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/query_rules/_types/QueryRuleset.ts#L25-L34 -type QueryRuleset struct { - // Rules Rules associated with the query ruleset - Rules []QueryRule `json:"rules"` - // RulesetId Query Ruleset unique identifier +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/query_rules/test/QueryRulesetTestResponse.ts#L30-L39 +type QueryRulesetMatchedRule struct { + // RuleId Rule unique identifier within that ruleset + RuleId string `json:"rule_id"` + // RulesetId Ruleset unique identifier RulesetId string `json:"ruleset_id"` } -func (s *QueryRuleset) UnmarshalJSON(data []byte) error { +func (s *QueryRulesetMatchedRule) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -53,9 +53,9 @@ func (s *QueryRuleset) UnmarshalJSON(data []byte) error { switch t { - case "rules": - if err := dec.Decode(&s.Rules); err != nil { - return fmt.Errorf("%s | %w", "Rules", err) + case "rule_id": + if err := dec.Decode(&s.RuleId); err != nil { + return fmt.Errorf("%s | %w", "RuleId", err) } case "ruleset_id": @@ -68,9 +68,11 @@ func (s *QueryRuleset) UnmarshalJSON(data []byte) error { return nil } -// NewQueryRuleset returns a QueryRuleset. -func NewQueryRuleset() *QueryRuleset { - r := &QueryRuleset{} +// NewQueryRulesetMatchedRule returns a QueryRulesetMatchedRule. +func NewQueryRulesetMatchedRule() *QueryRulesetMatchedRule { + r := &QueryRulesetMatchedRule{} return r } + +// false diff --git a/typedapi/types/querystringquery.go b/typedapi/types/querystringquery.go index 1cb3e1a471..777f35adab 100644 --- a/typedapi/types/querystringquery.go +++ b/typedapi/types/querystringquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // QueryStringQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L580-L700 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L598-L721 type QueryStringQuery struct { // AllowLeadingWildcard If `true`, the wildcard characters `*` and `?` are allowed as the first // character of the query string. @@ -433,3 +433,13 @@ func NewQueryStringQuery() *QueryStringQuery { return r } + +// true + +type QueryStringQueryVariant interface { + QueryStringQueryCaster() *QueryStringQuery +} + +func (s *QueryStringQuery) QueryStringQueryCaster() *QueryStringQuery { + return s +} diff --git a/typedapi/types/queryuser.go b/typedapi/types/queryuser.go index fc0c73a2bf..7841c98ead 100644 --- a/typedapi/types/queryuser.go +++ b/typedapi/types/queryuser.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // QueryUser type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_user/types.ts#L103-L105 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_user/types.ts#L103-L105 type QueryUser struct { Email *string `json:"email,omitempty"` Enabled bool `json:"enabled"` @@ -125,3 +125,5 @@ func NewQueryUser() *QueryUser { return r } + +// false diff --git a/typedapi/types/queryvectorbuilder.go b/typedapi/types/queryvectorbuilder.go index 8f03af4a49..9d419e315a 100644 --- a/typedapi/types/queryvectorbuilder.go +++ b/typedapi/types/queryvectorbuilder.go @@ -16,20 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // QueryVectorBuilder type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Knn.ts#L69-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Knn.ts#L89-L92 type QueryVectorBuilder struct { - TextEmbedding *TextEmbedding `json:"text_embedding,omitempty"` + AdditionalQueryVectorBuilderProperty map[string]json.RawMessage `json:"-"` + TextEmbedding *TextEmbedding `json:"text_embedding,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s QueryVectorBuilder) MarshalJSON() ([]byte, error) { + type opt QueryVectorBuilder + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalQueryVectorBuilderProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalQueryVectorBuilderProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewQueryVectorBuilder returns a QueryVectorBuilder. func NewQueryVectorBuilder() *QueryVectorBuilder { - r := &QueryVectorBuilder{} + r := &QueryVectorBuilder{ + AdditionalQueryVectorBuilderProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type QueryVectorBuilderVariant interface { + QueryVectorBuilderCaster() *QueryVectorBuilder +} + +func (s *QueryVectorBuilder) QueryVectorBuilderCaster() *QueryVectorBuilder { + return s +} diff --git a/typedapi/types/querywatch.go b/typedapi/types/querywatch.go index 4eb0d48c25..b211130285 100644 --- a/typedapi/types/querywatch.go +++ b/typedapi/types/querywatch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // QueryWatch type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Watch.ts#L58-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Watch.ts#L58-L64 type QueryWatch struct { Id_ string `json:"_id"` PrimaryTerm_ *int `json:"_primary_term,omitempty"` @@ -102,3 +102,5 @@ func NewQueryWatch() *QueryWatch { return r } + +// false diff --git a/typedapi/types/questionansweringinferenceoptions.go b/typedapi/types/questionansweringinferenceoptions.go index 36556b07a5..7263b66a3c 100644 --- a/typedapi/types/questionansweringinferenceoptions.go +++ b/typedapi/types/questionansweringinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // QuestionAnsweringInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L282-L292 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L270-L280 type QuestionAnsweringInferenceOptions struct { // MaxAnswerLength The maximum answer length to consider MaxAnswerLength *int `json:"max_answer_length,omitempty"` @@ -119,3 +119,13 @@ func NewQuestionAnsweringInferenceOptions() *QuestionAnsweringInferenceOptions { return r } + +// true + +type QuestionAnsweringInferenceOptionsVariant interface { + QuestionAnsweringInferenceOptionsCaster() *QuestionAnsweringInferenceOptions +} + +func (s *QuestionAnsweringInferenceOptions) QuestionAnsweringInferenceOptionsCaster() *QuestionAnsweringInferenceOptions { + return s +} diff --git a/typedapi/types/questionansweringinferenceupdateoptions.go b/typedapi/types/questionansweringinferenceupdateoptions.go index 51d78d7da3..6d8ed15e66 100644 --- a/typedapi/types/questionansweringinferenceupdateoptions.go +++ b/typedapi/types/questionansweringinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // QuestionAnsweringInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L420-L431 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L408-L419 type QuestionAnsweringInferenceUpdateOptions struct { // MaxAnswerLength The maximum answer length to consider for extraction MaxAnswerLength *int `json:"max_answer_length,omitempty"` @@ -133,3 +133,13 @@ func NewQuestionAnsweringInferenceUpdateOptions() *QuestionAnsweringInferenceUpd return r } + +// true + +type QuestionAnsweringInferenceUpdateOptionsVariant interface { + QuestionAnsweringInferenceUpdateOptionsCaster() *QuestionAnsweringInferenceUpdateOptions +} + +func (s *QuestionAnsweringInferenceUpdateOptions) QuestionAnsweringInferenceUpdateOptionsCaster() *QuestionAnsweringInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/randomsampleraggregation.go b/typedapi/types/randomsampleraggregation.go new file mode 100644 index 0000000000..bb1fbac3e1 --- /dev/null +++ b/typedapi/types/randomsampleraggregation.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RandomSamplerAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L749-L769 +type RandomSamplerAggregation struct { + // Probability The probability that a document will be included in the aggregated data. + // Must be greater than 0, less than 0.5, or exactly 1. + // The lower the probability, the fewer documents are matched. + Probability Float64 `json:"probability"` + // Seed The seed to generate the random sampling of documents. + // When a seed is provided, the random subset of documents is the same between + // calls. + Seed *int `json:"seed,omitempty"` + // ShardSeed When combined with seed, setting shard_seed ensures 100% consistent sampling + // over shards where data is exactly the same. + ShardSeed *int `json:"shard_seed,omitempty"` +} + +func (s *RandomSamplerAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Probability", err) + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "seed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Seed", err) + } + s.Seed = &value + case float64: + f := int(v) + s.Seed = &f + } + + case "shard_seed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSeed", err) + } + s.ShardSeed = &value + case float64: + f := int(v) + s.ShardSeed = &f + } + + } + } + return nil +} + +// NewRandomSamplerAggregation returns a RandomSamplerAggregation. +func NewRandomSamplerAggregation() *RandomSamplerAggregation { + r := &RandomSamplerAggregation{} + + return r +} + +// true + +type RandomSamplerAggregationVariant interface { + RandomSamplerAggregationCaster() *RandomSamplerAggregation +} + +func (s *RandomSamplerAggregation) RandomSamplerAggregationCaster() *RandomSamplerAggregation { + return s +} diff --git a/typedapi/types/randomscorefunction.go b/typedapi/types/randomscorefunction.go index 87e3e52c9f..10d5310efa 100644 --- a/typedapi/types/randomscorefunction.go +++ b/typedapi/types/randomscorefunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RandomScoreFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L131-L134 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L144-L147 type RandomScoreFunction struct { Field *string `json:"field,omitempty"` Seed string `json:"seed,omitempty"` @@ -80,3 +80,13 @@ func NewRandomScoreFunction() *RandomScoreFunction { return r } + +// true + +type RandomScoreFunctionVariant interface { + RandomScoreFunctionCaster() *RandomScoreFunction +} + +func (s *RandomScoreFunction) RandomScoreFunctionCaster() *RandomScoreFunction { + return s +} diff --git a/typedapi/types/rangeaggregate.go b/typedapi/types/rangeaggregate.go index 4e6e73678e..b166f4243f 100644 --- a/typedapi/types/rangeaggregate.go +++ b/typedapi/types/rangeaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RangeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L535-L536 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L594-L598 type RangeAggregate struct { Buckets BucketsRangeBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewRangeAggregate() *RangeAggregate { return r } + +// false diff --git a/typedapi/types/rangeaggregation.go b/typedapi/types/rangeaggregation.go index d91925fccf..acb4134311 100644 --- a/typedapi/types/rangeaggregation.go +++ b/typedapi/types/rangeaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RangeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L652-L672 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L669-L689 type RangeAggregation struct { // Field The date field whose values are use to build ranges. Field *string `json:"field,omitempty"` @@ -130,3 +130,13 @@ func NewRangeAggregation() *RangeAggregation { return r } + +// true + +type RangeAggregationVariant interface { + RangeAggregationCaster() *RangeAggregation +} + +func (s *RangeAggregation) RangeAggregationCaster() *RangeAggregation { + return s +} diff --git a/typedapi/types/rangebucket.go b/typedapi/types/rangebucket.go index 6aec346e40..382a4adfe4 100644 --- a/typedapi/types/rangebucket.go +++ b/typedapi/types/rangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // RangeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L538-L545 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L600-L607 type RangeBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -559,6 +559,13 @@ func (s *RangeBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -692,8 +699,10 @@ func (s RangeBucket) MarshalJSON() ([]byte, error) { // NewRangeBucket returns a RangeBucket. func NewRangeBucket() *RangeBucket { r := &RangeBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/rangequery.go b/typedapi/types/rangequery.go index c6ab6d37f7..5afde56d97 100644 --- a/typedapi/types/rangequery.go +++ b/typedapi/types/rangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,5 +27,9 @@ package types // NumberRangeQuery // TermRangeQuery // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L161-L170 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L176-L186 type RangeQuery any + +type RangeQueryVariant interface { + RangeQueryCaster() *RangeQuery +} diff --git a/typedapi/types/rangequerybasedatemath.go b/typedapi/types/rangequerybasedatemath.go deleted file mode 100644 index 2e96acbdba..0000000000 --- a/typedapi/types/rangequerybasedatemath.go +++ /dev/null @@ -1,147 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" -) - -// RangeQueryBaseDateMath type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L109-L133 -type RangeQueryBaseDateMath struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - From *string `json:"from,omitempty"` - // Gt Greater than. - Gt *string `json:"gt,omitempty"` - // Gte Greater than or equal to. - Gte *string `json:"gte,omitempty"` - // Lt Less than. - Lt *string `json:"lt,omitempty"` - // Lte Less than or equal to. - Lte *string `json:"lte,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - // Relation Indicates how the range query matches values for `range` fields. - Relation *rangerelation.RangeRelation `json:"relation,omitempty"` - To *string `json:"to,omitempty"` -} - -func (s *RangeQueryBaseDateMath) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "from": - if err := dec.Decode(&s.From); err != nil { - return fmt.Errorf("%s | %w", "From", err) - } - - case "gt": - if err := dec.Decode(&s.Gt); err != nil { - return fmt.Errorf("%s | %w", "Gt", err) - } - - case "gte": - if err := dec.Decode(&s.Gte); err != nil { - return fmt.Errorf("%s | %w", "Gte", err) - } - - case "lt": - if err := dec.Decode(&s.Lt); err != nil { - return fmt.Errorf("%s | %w", "Lt", err) - } - - case "lte": - if err := dec.Decode(&s.Lte); err != nil { - return fmt.Errorf("%s | %w", "Lte", err) - } - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - case "relation": - if err := dec.Decode(&s.Relation); err != nil { - return fmt.Errorf("%s | %w", "Relation", err) - } - - case "to": - if err := dec.Decode(&s.To); err != nil { - return fmt.Errorf("%s | %w", "To", err) - } - - } - } - return nil -} - -// NewRangeQueryBaseDateMath returns a RangeQueryBaseDateMath. -func NewRangeQueryBaseDateMath() *RangeQueryBaseDateMath { - r := &RangeQueryBaseDateMath{} - - return r -} diff --git a/typedapi/types/rangequerybasedouble.go b/typedapi/types/rangequerybasedouble.go deleted file mode 100644 index d85e802a55..0000000000 --- a/typedapi/types/rangequerybasedouble.go +++ /dev/null @@ -1,191 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" -) - -// RangeQueryBasedouble type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L109-L133 -type RangeQueryBasedouble struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - From *Float64 `json:"from,omitempty"` - // Gt Greater than. - Gt *Float64 `json:"gt,omitempty"` - // Gte Greater than or equal to. - Gte *Float64 `json:"gte,omitempty"` - // Lt Less than. - Lt *Float64 `json:"lt,omitempty"` - // Lte Less than or equal to. - Lte *Float64 `json:"lte,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - // Relation Indicates how the range query matches values for `range` fields. - Relation *rangerelation.RangeRelation `json:"relation,omitempty"` - To *Float64 `json:"to,omitempty"` -} - -func (s *RangeQueryBasedouble) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "from": - if err := dec.Decode(&s.From); err != nil { - return fmt.Errorf("%s | %w", "From", err) - } - - case "gt": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Gt", err) - } - f := Float64(value) - s.Gt = &f - case float64: - f := Float64(v) - s.Gt = &f - } - - case "gte": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Gte", err) - } - f := Float64(value) - s.Gte = &f - case float64: - f := Float64(v) - s.Gte = &f - } - - case "lt": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Lt", err) - } - f := Float64(value) - s.Lt = &f - case float64: - f := Float64(v) - s.Lt = &f - } - - case "lte": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Lte", err) - } - f := Float64(value) - s.Lte = &f - case float64: - f := Float64(v) - s.Lte = &f - } - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - case "relation": - if err := dec.Decode(&s.Relation); err != nil { - return fmt.Errorf("%s | %w", "Relation", err) - } - - case "to": - if err := dec.Decode(&s.To); err != nil { - return fmt.Errorf("%s | %w", "To", err) - } - - } - } - return nil -} - -// NewRangeQueryBasedouble returns a RangeQueryBasedouble. -func NewRangeQueryBasedouble() *RangeQueryBasedouble { - r := &RangeQueryBasedouble{} - - return r -} diff --git a/typedapi/types/rangequerybasestring.go b/typedapi/types/rangequerybasestring.go deleted file mode 100644 index 4b26094a59..0000000000 --- a/typedapi/types/rangequerybasestring.go +++ /dev/null @@ -1,189 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" -) - -// RangeQueryBasestring type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L109-L133 -type RangeQueryBasestring struct { - // Boost Floating point number used to decrease or increase the relevance scores of - // the query. - // Boost values are relative to the default value of 1.0. - // A boost value between 0 and 1.0 decreases the relevance score. - // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - From *string `json:"from,omitempty"` - // Gt Greater than. - Gt *string `json:"gt,omitempty"` - // Gte Greater than or equal to. - Gte *string `json:"gte,omitempty"` - // Lt Less than. - Lt *string `json:"lt,omitempty"` - // Lte Less than or equal to. - Lte *string `json:"lte,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - // Relation Indicates how the range query matches values for `range` fields. - Relation *rangerelation.RangeRelation `json:"relation,omitempty"` - To *string `json:"to,omitempty"` -} - -func (s *RangeQueryBasestring) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "boost": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return fmt.Errorf("%s | %w", "Boost", err) - } - f := float32(value) - s.Boost = &f - case float64: - f := float32(v) - s.Boost = &f - } - - case "from": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "From", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.From = &o - - case "gt": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Gt", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Gt = &o - - case "gte": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Gte", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Gte = &o - - case "lt": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Lt", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Lt = &o - - case "lte": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Lte", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Lte = &o - - case "_name": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "QueryName_", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.QueryName_ = &o - - case "relation": - if err := dec.Decode(&s.Relation); err != nil { - return fmt.Errorf("%s | %w", "Relation", err) - } - - case "to": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "To", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.To = &o - - } - } - return nil -} - -// NewRangeQueryBasestring returns a RangeQueryBasestring. -func NewRangeQueryBasestring() *RangeQueryBasestring { - r := &RangeQueryBasestring{} - - return r -} diff --git a/typedapi/types/rankcontainer.go b/typedapi/types/rankcontainer.go index bb6804dc81..4c7d987462 100644 --- a/typedapi/types/rankcontainer.go +++ b/typedapi/types/rankcontainer.go @@ -16,21 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // RankContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Rank.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Rank.ts#L22-L28 type RankContainer struct { + AdditionalRankContainerProperty map[string]json.RawMessage `json:"-"` // Rrf The reciprocal rank fusion parameters Rrf *RrfRank `json:"rrf,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s RankContainer) MarshalJSON() ([]byte, error) { + type opt RankContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRankContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRankContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewRankContainer returns a RankContainer. func NewRankContainer() *RankContainer { - r := &RankContainer{} + r := &RankContainer{ + AdditionalRankContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type RankContainerVariant interface { + RankContainerCaster() *RankContainer +} + +func (s *RankContainer) RankContainerCaster() *RankContainer { + return s +} diff --git a/typedapi/types/rankeddocument.go b/typedapi/types/rankeddocument.go index cd3d43ff34..a249bbacdc 100644 --- a/typedapi/types/rankeddocument.go +++ b/typedapi/types/rankeddocument.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,11 @@ import ( // RankedDocument type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Results.ts#L67-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Results.ts#L90-L100 type RankedDocument struct { - Index int `json:"index"` - Score float32 `json:"score"` - Text *string `json:"text,omitempty"` + Index int `json:"index"` + RelevanceScore float32 `json:"relevance_score"` + Text *string `json:"text,omitempty"` } func (s *RankedDocument) UnmarshalJSON(data []byte) error { @@ -69,20 +69,20 @@ func (s *RankedDocument) UnmarshalJSON(data []byte) error { s.Index = f } - case "score": + case "relevance_score": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseFloat(v, 32) if err != nil { - return fmt.Errorf("%s | %w", "Score", err) + return fmt.Errorf("%s | %w", "RelevanceScore", err) } f := float32(value) - s.Score = f + s.RelevanceScore = f case float64: f := float32(v) - s.Score = f + s.RelevanceScore = f } case "text": @@ -108,3 +108,5 @@ func NewRankedDocument() *RankedDocument { return r } + +// false diff --git a/typedapi/types/rankevalhit.go b/typedapi/types/rankevalhit.go index cd134f7e4b..b34dc891da 100644 --- a/typedapi/types/rankevalhit.go +++ b/typedapi/types/rankevalhit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankEvalHit type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L144-L148 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L144-L148 type RankEvalHit struct { Id_ string `json:"_id"` Index_ string `json:"_index"` @@ -90,3 +90,5 @@ func NewRankEvalHit() *RankEvalHit { return r } + +// false diff --git a/typedapi/types/rankevalhititem.go b/typedapi/types/rankevalhititem.go index 6cca11ff19..a8d9c9417b 100644 --- a/typedapi/types/rankevalhititem.go +++ b/typedapi/types/rankevalhititem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RankEvalHitItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L139-L142 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L139-L142 type RankEvalHitItem struct { Hit RankEvalHit `json:"hit"` Rating *Float64 `json:"rating,omitempty"` @@ -72,3 +72,5 @@ func NewRankEvalHitItem() *RankEvalHitItem { return r } + +// false diff --git a/typedapi/types/rankevalmetric.go b/typedapi/types/rankevalmetric.go index f0bce08b6b..fe47ce96b3 100644 --- a/typedapi/types/rankevalmetric.go +++ b/typedapi/types/rankevalmetric.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // RankEvalMetric type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L90-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L90-L96 type RankEvalMetric struct { Dcg *RankEvalMetricDiscountedCumulativeGain `json:"dcg,omitempty"` ExpectedReciprocalRank *RankEvalMetricExpectedReciprocalRank `json:"expected_reciprocal_rank,omitempty"` @@ -37,3 +37,13 @@ func NewRankEvalMetric() *RankEvalMetric { return r } + +// true + +type RankEvalMetricVariant interface { + RankEvalMetricCaster() *RankEvalMetric +} + +func (s *RankEvalMetric) RankEvalMetricCaster() *RankEvalMetric { + return s +} diff --git a/typedapi/types/rankevalmetricdetail.go b/typedapi/types/rankevalmetricdetail.go index 5421aad022..7563113ec3 100644 --- a/typedapi/types/rankevalmetricdetail.go +++ b/typedapi/types/rankevalmetricdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L128-L137 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L128-L137 type RankEvalMetricDetail struct { // Hits The hits section shows a grouping of the search results with their supplied // ratings @@ -106,8 +106,10 @@ func (s *RankEvalMetricDetail) UnmarshalJSON(data []byte) error { // NewRankEvalMetricDetail returns a RankEvalMetricDetail. func NewRankEvalMetricDetail() *RankEvalMetricDetail { r := &RankEvalMetricDetail{ - MetricDetails: make(map[string]map[string]json.RawMessage, 0), + MetricDetails: make(map[string]map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/rankevalmetricdiscountedcumulativegain.go b/typedapi/types/rankevalmetricdiscountedcumulativegain.go index a04d7ca029..64ab92337e 100644 --- a/typedapi/types/rankevalmetricdiscountedcumulativegain.go +++ b/typedapi/types/rankevalmetricdiscountedcumulativegain.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricDiscountedCumulativeGain type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L66-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L66-L77 type RankEvalMetricDiscountedCumulativeGain struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -96,3 +96,13 @@ func NewRankEvalMetricDiscountedCumulativeGain() *RankEvalMetricDiscountedCumula return r } + +// true + +type RankEvalMetricDiscountedCumulativeGainVariant interface { + RankEvalMetricDiscountedCumulativeGainCaster() *RankEvalMetricDiscountedCumulativeGain +} + +func (s *RankEvalMetricDiscountedCumulativeGain) RankEvalMetricDiscountedCumulativeGainCaster() *RankEvalMetricDiscountedCumulativeGain { + return s +} diff --git a/typedapi/types/rankevalmetricexpectedreciprocalrank.go b/typedapi/types/rankevalmetricexpectedreciprocalrank.go index f6ab9a4228..d35fb36a6e 100644 --- a/typedapi/types/rankevalmetricexpectedreciprocalrank.go +++ b/typedapi/types/rankevalmetricexpectedreciprocalrank.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricExpectedReciprocalRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L79-L88 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L79-L88 type RankEvalMetricExpectedReciprocalRank struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -98,3 +98,13 @@ func NewRankEvalMetricExpectedReciprocalRank() *RankEvalMetricExpectedReciprocal return r } + +// true + +type RankEvalMetricExpectedReciprocalRankVariant interface { + RankEvalMetricExpectedReciprocalRankCaster() *RankEvalMetricExpectedReciprocalRank +} + +func (s *RankEvalMetricExpectedReciprocalRank) RankEvalMetricExpectedReciprocalRankCaster() *RankEvalMetricExpectedReciprocalRank { + return s +} diff --git a/typedapi/types/rankevalmetricmeanreciprocalrank.go b/typedapi/types/rankevalmetricmeanreciprocalrank.go index 02e0777ea4..a51db76539 100644 --- a/typedapi/types/rankevalmetricmeanreciprocalrank.go +++ b/typedapi/types/rankevalmetricmeanreciprocalrank.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricMeanReciprocalRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L60-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L60-L64 type RankEvalMetricMeanReciprocalRank struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -99,3 +99,13 @@ func NewRankEvalMetricMeanReciprocalRank() *RankEvalMetricMeanReciprocalRank { return r } + +// true + +type RankEvalMetricMeanReciprocalRankVariant interface { + RankEvalMetricMeanReciprocalRankCaster() *RankEvalMetricMeanReciprocalRank +} + +func (s *RankEvalMetricMeanReciprocalRank) RankEvalMetricMeanReciprocalRankCaster() *RankEvalMetricMeanReciprocalRank { + return s +} diff --git a/typedapi/types/rankevalmetricprecision.go b/typedapi/types/rankevalmetricprecision.go index 77fe859f5c..89cdd97838 100644 --- a/typedapi/types/rankevalmetricprecision.go +++ b/typedapi/types/rankevalmetricprecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricPrecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L42-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L42-L52 type RankEvalMetricPrecision struct { // IgnoreUnlabeled Controls how unlabeled documents in the search results are counted. If set to // true, unlabeled documents are ignored and neither count as relevant or @@ -117,3 +117,13 @@ func NewRankEvalMetricPrecision() *RankEvalMetricPrecision { return r } + +// true + +type RankEvalMetricPrecisionVariant interface { + RankEvalMetricPrecisionCaster() *RankEvalMetricPrecision +} + +func (s *RankEvalMetricPrecision) RankEvalMetricPrecisionCaster() *RankEvalMetricPrecision { + return s +} diff --git a/typedapi/types/rankevalmetricrecall.go b/typedapi/types/rankevalmetricrecall.go index afc7189b8c..60b9cbae98 100644 --- a/typedapi/types/rankevalmetricrecall.go +++ b/typedapi/types/rankevalmetricrecall.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankEvalMetricRecall type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L54-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L54-L58 type RankEvalMetricRecall struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -99,3 +99,13 @@ func NewRankEvalMetricRecall() *RankEvalMetricRecall { return r } + +// true + +type RankEvalMetricRecallVariant interface { + RankEvalMetricRecallCaster() *RankEvalMetricRecall +} + +func (s *RankEvalMetricRecall) RankEvalMetricRecallCaster() *RankEvalMetricRecall { + return s +} diff --git a/typedapi/types/rankevalquery.go b/typedapi/types/rankevalquery.go index 25eef0223b..7cc04b41b0 100644 --- a/typedapi/types/rankevalquery.go +++ b/typedapi/types/rankevalquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankEvalQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L111-L117 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L111-L117 type RankEvalQuery struct { Query Query `json:"query"` Size *int `json:"size,omitempty"` @@ -89,3 +89,13 @@ func NewRankEvalQuery() *RankEvalQuery { return r } + +// true + +type RankEvalQueryVariant interface { + RankEvalQueryCaster() *RankEvalQuery +} + +func (s *RankEvalQuery) RankEvalQueryCaster() *RankEvalQuery { + return s +} diff --git a/typedapi/types/rankevalrequestitem.go b/typedapi/types/rankevalrequestitem.go index c47770e27a..b86568e016 100644 --- a/typedapi/types/rankevalrequestitem.go +++ b/typedapi/types/rankevalrequestitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RankEvalRequestItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L98-L109 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L98-L109 type RankEvalRequestItem struct { // Id The search request’s ID, used to group result details later. Id string `json:"id"` @@ -95,8 +95,18 @@ func (s *RankEvalRequestItem) UnmarshalJSON(data []byte) error { // NewRankEvalRequestItem returns a RankEvalRequestItem. func NewRankEvalRequestItem() *RankEvalRequestItem { r := &RankEvalRequestItem{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type RankEvalRequestItemVariant interface { + RankEvalRequestItemCaster() *RankEvalRequestItem +} + +func (s *RankEvalRequestItem) RankEvalRequestItemCaster() *RankEvalRequestItem { + return s +} diff --git a/typedapi/types/rankfeaturefunctionlinear.go b/typedapi/types/rankfeaturefunctionlinear.go index db946f6339..04ccb56601 100644 --- a/typedapi/types/rankfeaturefunctionlinear.go +++ b/typedapi/types/rankfeaturefunctionlinear.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // RankFeatureFunctionLinear type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L275-L275 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L282-L282 type RankFeatureFunctionLinear struct { } @@ -32,3 +32,13 @@ func NewRankFeatureFunctionLinear() *RankFeatureFunctionLinear { return r } + +// true + +type RankFeatureFunctionLinearVariant interface { + RankFeatureFunctionLinearCaster() *RankFeatureFunctionLinear +} + +func (s *RankFeatureFunctionLinear) RankFeatureFunctionLinearCaster() *RankFeatureFunctionLinear { + return s +} diff --git a/typedapi/types/rankfeaturefunctionlogarithm.go b/typedapi/types/rankfeaturefunctionlogarithm.go index a0cea016c7..c7e215a7d9 100644 --- a/typedapi/types/rankfeaturefunctionlogarithm.go +++ b/typedapi/types/rankfeaturefunctionlogarithm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankFeatureFunctionLogarithm type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L277-L282 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L284-L289 type RankFeatureFunctionLogarithm struct { // ScalingFactor Configurable scaling factor. ScalingFactor float32 `json:"scaling_factor"` @@ -79,3 +79,13 @@ func NewRankFeatureFunctionLogarithm() *RankFeatureFunctionLogarithm { return r } + +// true + +type RankFeatureFunctionLogarithmVariant interface { + RankFeatureFunctionLogarithmCaster() *RankFeatureFunctionLogarithm +} + +func (s *RankFeatureFunctionLogarithm) RankFeatureFunctionLogarithmCaster() *RankFeatureFunctionLogarithm { + return s +} diff --git a/typedapi/types/rankfeaturefunctionsaturation.go b/typedapi/types/rankfeaturefunctionsaturation.go index 586209b849..8151bfe006 100644 --- a/typedapi/types/rankfeaturefunctionsaturation.go +++ b/typedapi/types/rankfeaturefunctionsaturation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankFeatureFunctionSaturation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L284-L289 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L291-L296 type RankFeatureFunctionSaturation struct { // Pivot Configurable pivot value so that the result will be less than 0.5. Pivot *float32 `json:"pivot,omitempty"` @@ -79,3 +79,13 @@ func NewRankFeatureFunctionSaturation() *RankFeatureFunctionSaturation { return r } + +// true + +type RankFeatureFunctionSaturationVariant interface { + RankFeatureFunctionSaturationCaster() *RankFeatureFunctionSaturation +} + +func (s *RankFeatureFunctionSaturation) RankFeatureFunctionSaturationCaster() *RankFeatureFunctionSaturation { + return s +} diff --git a/typedapi/types/rankfeaturefunctionsigmoid.go b/typedapi/types/rankfeaturefunctionsigmoid.go index 51bfd05d3a..cba8da1b5e 100644 --- a/typedapi/types/rankfeaturefunctionsigmoid.go +++ b/typedapi/types/rankfeaturefunctionsigmoid.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankFeatureFunctionSigmoid type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L291-L300 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L298-L307 type RankFeatureFunctionSigmoid struct { // Exponent Configurable Exponent. Exponent float32 `json:"exponent"` @@ -97,3 +97,13 @@ func NewRankFeatureFunctionSigmoid() *RankFeatureFunctionSigmoid { return r } + +// true + +type RankFeatureFunctionSigmoidVariant interface { + RankFeatureFunctionSigmoidCaster() *RankFeatureFunctionSigmoid +} + +func (s *RankFeatureFunctionSigmoid) RankFeatureFunctionSigmoidCaster() *RankFeatureFunctionSigmoid { + return s +} diff --git a/typedapi/types/rankfeatureproperty.go b/typedapi/types/rankfeatureproperty.go index 6c7b111aae..9145011d54 100644 --- a/typedapi/types/rankfeatureproperty.go +++ b/typedapi/types/rankfeatureproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // RankFeatureProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L192-L195 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L196-L199 type RankFeatureProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -441,306 +455,323 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -761,6 +792,7 @@ func (s RankFeatureProperty) MarshalJSON() ([]byte, error) { Meta: s.Meta, PositiveScoreImpact: s.PositiveScoreImpact, Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -772,10 +804,20 @@ func (s RankFeatureProperty) MarshalJSON() ([]byte, error) { // NewRankFeatureProperty returns a RankFeatureProperty. func NewRankFeatureProperty() *RankFeatureProperty { r := &RankFeatureProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type RankFeaturePropertyVariant interface { + RankFeaturePropertyCaster() *RankFeatureProperty +} + +func (s *RankFeatureProperty) RankFeaturePropertyCaster() *RankFeatureProperty { + return s +} diff --git a/typedapi/types/rankfeaturequery.go b/typedapi/types/rankfeaturequery.go index e63d625685..fc70b16364 100644 --- a/typedapi/types/rankfeaturequery.go +++ b/typedapi/types/rankfeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RankFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L302-L325 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L309-L335 type RankFeatureQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -135,3 +135,13 @@ func NewRankFeatureQuery() *RankFeatureQuery { return r } + +// true + +type RankFeatureQueryVariant interface { + RankFeatureQueryCaster() *RankFeatureQuery +} + +func (s *RankFeatureQuery) RankFeatureQueryCaster() *RankFeatureQuery { + return s +} diff --git a/typedapi/types/rankfeaturesproperty.go b/typedapi/types/rankfeaturesproperty.go index 6c4c7bfce1..07a025fddd 100644 --- a/typedapi/types/rankfeaturesproperty.go +++ b/typedapi/types/rankfeaturesproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,20 +29,22 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // RankFeaturesProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L197-L200 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L201-L204 type RankFeaturesProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { @@ -84,301 +86,313 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -441,306 +455,323 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -761,6 +792,7 @@ func (s RankFeaturesProperty) MarshalJSON() ([]byte, error) { Meta: s.Meta, PositiveScoreImpact: s.PositiveScoreImpact, Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -772,10 +804,20 @@ func (s RankFeaturesProperty) MarshalJSON() ([]byte, error) { // NewRankFeaturesProperty returns a RankFeaturesProperty. func NewRankFeaturesProperty() *RankFeaturesProperty { r := &RankFeaturesProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type RankFeaturesPropertyVariant interface { + RankFeaturesPropertyCaster() *RankFeaturesProperty +} + +func (s *RankFeaturesProperty) RankFeaturesPropertyCaster() *RankFeaturesProperty { + return s +} diff --git a/typedapi/types/raretermsaggregation.go b/typedapi/types/raretermsaggregation.go index 8bc47bc5e4..1dcd0b1be4 100644 --- a/typedapi/types/raretermsaggregation.go +++ b/typedapi/types/raretermsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RareTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L689-L719 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L706-L739 type RareTermsAggregation struct { // Exclude Terms that should be excluded from the aggregation. Exclude []string `json:"exclude,omitempty"` @@ -181,3 +181,13 @@ func NewRareTermsAggregation() *RareTermsAggregation { return r } + +// true + +type RareTermsAggregationVariant interface { + RareTermsAggregationCaster() *RareTermsAggregation +} + +func (s *RareTermsAggregation) RareTermsAggregationCaster() *RareTermsAggregation { + return s +} diff --git a/typedapi/types/rateaggregate.go b/typedapi/types/rateaggregate.go index 225faaf594..ef87dcefbc 100644 --- a/typedapi/types/rateaggregate.go +++ b/typedapi/types/rateaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RateAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L748-L752 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L847-L854 type RateAggregate struct { Meta Metadata `json:"meta,omitempty"` Value Float64 `json:"value"` @@ -97,3 +97,5 @@ func NewRateAggregate() *RateAggregate { return r } + +// false diff --git a/typedapi/types/rateaggregation.go b/typedapi/types/rateaggregation.go index 6382e24793..9baa5980c7 100644 --- a/typedapi/types/rateaggregation.go +++ b/typedapi/types/rateaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // RateAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L230-L241 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L239-L250 type RateAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -113,3 +113,13 @@ func NewRateAggregation() *RateAggregation { return r } + +// true + +type RateAggregationVariant interface { + RateAggregationCaster() *RateAggregation +} + +func (s *RateAggregation) RateAggregationCaster() *RateAggregation { + return s +} diff --git a/typedapi/types/ratelimitsetting.go b/typedapi/types/ratelimitsetting.go new file mode 100644 index 0000000000..29fe6ed50b --- /dev/null +++ b/typedapi/types/ratelimitsetting.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RateLimitSetting type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Services.ts#L95-L100 +type RateLimitSetting struct { + // RequestsPerMinute The number of requests allowed per minute. + RequestsPerMinute *int `json:"requests_per_minute,omitempty"` +} + +func (s *RateLimitSetting) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "requests_per_minute": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RequestsPerMinute", err) + } + s.RequestsPerMinute = &value + case float64: + f := int(v) + s.RequestsPerMinute = &f + } + + } + } + return nil +} + +// NewRateLimitSetting returns a RateLimitSetting. +func NewRateLimitSetting() *RateLimitSetting { + r := &RateLimitSetting{} + + return r +} + +// true + +type RateLimitSettingVariant interface { + RateLimitSettingCaster() *RateLimitSetting +} + +func (s *RateLimitSetting) RateLimitSettingCaster() *RateLimitSetting { + return s +} diff --git a/typedapi/types/readblobdetails.go b/typedapi/types/readblobdetails.go new file mode 100644 index 0000000000..1b46418c6e --- /dev/null +++ b/typedapi/types/readblobdetails.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReadBlobDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L204-L248 +type ReadBlobDetails struct { + // BeforeWriteComplete Indicates whether the read operation may have started before the write + // operation was complete. + BeforeWriteComplete *bool `json:"before_write_complete,omitempty"` + // Elapsed The length of time spent reading the blob. + // If the blob was not found, this detail is omitted. + Elapsed Duration `json:"elapsed,omitempty"` + // ElapsedNanos The length of time spent reading the blob, in nanoseconds. + // If the blob was not found, this detail is omitted. + ElapsedNanos *int64 `json:"elapsed_nanos,omitempty"` + // FirstByteTime The length of time waiting for the first byte of the read operation to be + // received. + // If the blob was not found, this detail is omitted. + FirstByteTime Duration `json:"first_byte_time,omitempty"` + // FirstByteTimeNanos The length of time waiting for the first byte of the read operation to be + // received, in nanoseconds. + // If the blob was not found, this detail is omitted. + FirstByteTimeNanos int64 `json:"first_byte_time_nanos"` + // Found Indicates whether the blob was found by the read operation. + // If the read was started before the write completed or the write was ended + // before completion, it might be false. + Found bool `json:"found"` + // Node The node that performed the read operation. + Node SnapshotNodeInfo `json:"node"` + // Throttled The length of time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles during the read of the blob. + // If the blob was not found, this detail is omitted. + Throttled Duration `json:"throttled,omitempty"` + // ThrottledNanos The length of time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles during the read of the blob, + // in nanoseconds. + // If the blob was not found, this detail is omitted. + ThrottledNanos *int64 `json:"throttled_nanos,omitempty"` +} + +func (s *ReadBlobDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "before_write_complete": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BeforeWriteComplete", err) + } + s.BeforeWriteComplete = &value + case bool: + s.BeforeWriteComplete = &v + } + + case "elapsed": + if err := dec.Decode(&s.Elapsed); err != nil { + return fmt.Errorf("%s | %w", "Elapsed", err) + } + + case "elapsed_nanos": + if err := dec.Decode(&s.ElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "ElapsedNanos", err) + } + + case "first_byte_time": + if err := dec.Decode(&s.FirstByteTime); err != nil { + return fmt.Errorf("%s | %w", "FirstByteTime", err) + } + + case "first_byte_time_nanos": + if err := dec.Decode(&s.FirstByteTimeNanos); err != nil { + return fmt.Errorf("%s | %w", "FirstByteTimeNanos", err) + } + + case "found": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Found", err) + } + s.Found = value + case bool: + s.Found = v + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "throttled": + if err := dec.Decode(&s.Throttled); err != nil { + return fmt.Errorf("%s | %w", "Throttled", err) + } + + case "throttled_nanos": + if err := dec.Decode(&s.ThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "ThrottledNanos", err) + } + + } + } + return nil +} + +// NewReadBlobDetails returns a ReadBlobDetails. +func NewReadBlobDetails() *ReadBlobDetails { + r := &ReadBlobDetails{} + + return r +} + +// false diff --git a/typedapi/types/readexception.go b/typedapi/types/readexception.go index cf35c0f69e..163c1a6b19 100644 --- a/typedapi/types/readexception.go +++ b/typedapi/types/readexception.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,14 @@ import ( // ReadException type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ccr/_types/FollowIndexStats.ts#L71-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ccr/_types/FollowIndexStats.ts#L111-L118 type ReadException struct { + // Exception The exception that caused the read to fail. Exception ErrorCause `json:"exception"` - FromSeqNo int64 `json:"from_seq_no"` - Retries int `json:"retries"` + // FromSeqNo The starting sequence number of the batch requested from the leader. + FromSeqNo int64 `json:"from_seq_no"` + // Retries The number of times the batch has been retried. + Retries int `json:"retries"` } func (s *ReadException) UnmarshalJSON(data []byte) error { @@ -90,3 +93,5 @@ func NewReadException() *ReadException { return r } + +// false diff --git a/typedapi/types/readonlyurlrepository.go b/typedapi/types/readonlyurlrepository.go index 636f50f83b..1a261b8292 100644 --- a/typedapi/types/readonlyurlrepository.go +++ b/typedapi/types/readonlyurlrepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ReadOnlyUrlRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L60-L63 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L60-L63 type ReadOnlyUrlRepository struct { Settings ReadOnlyUrlRepositorySettings `json:"settings"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewReadOnlyUrlRepository() *ReadOnlyUrlRepository { return r } + +// true + +type ReadOnlyUrlRepositoryVariant interface { + ReadOnlyUrlRepositoryCaster() *ReadOnlyUrlRepository +} + +func (s *ReadOnlyUrlRepository) ReadOnlyUrlRepositoryCaster() *ReadOnlyUrlRepository { + return s +} diff --git a/typedapi/types/readonlyurlrepositorysettings.go b/typedapi/types/readonlyurlrepositorysettings.go index 6f115c5be6..ff13de8c62 100644 --- a/typedapi/types/readonlyurlrepositorysettings.go +++ b/typedapi/types/readonlyurlrepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ReadOnlyUrlRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L110-L115 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L110-L115 type ReadOnlyUrlRepositorySettings struct { ChunkSize ByteSize `json:"chunk_size,omitempty"` Compress *bool `json:"compress,omitempty"` @@ -147,3 +147,13 @@ func NewReadOnlyUrlRepositorySettings() *ReadOnlyUrlRepositorySettings { return r } + +// true + +type ReadOnlyUrlRepositorySettingsVariant interface { + ReadOnlyUrlRepositorySettingsCaster() *ReadOnlyUrlRepositorySettings +} + +func (s *ReadOnlyUrlRepositorySettings) ReadOnlyUrlRepositorySettingsCaster() *ReadOnlyUrlRepositorySettings { + return s +} diff --git a/typedapi/types/readsummaryinfo.go b/typedapi/types/readsummaryinfo.go new file mode 100644 index 0000000000..f395a0ed51 --- /dev/null +++ b/typedapi/types/readsummaryinfo.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReadSummaryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L115-L160 +type ReadSummaryInfo struct { + // Count The number of read operations performed in the test. + Count int `json:"count"` + // MaxWait The maximum time spent waiting for the first byte of any read request to be + // received. + MaxWait Duration `json:"max_wait"` + // MaxWaitNanos The maximum time spent waiting for the first byte of any read request to be + // received, in nanoseconds. + MaxWaitNanos int64 `json:"max_wait_nanos"` + // TotalElapsed The total elapsed time spent on reading blobs in the test. + TotalElapsed Duration `json:"total_elapsed"` + // TotalElapsedNanos The total elapsed time spent on reading blobs in the test, in nanoseconds. + TotalElapsedNanos int64 `json:"total_elapsed_nanos"` + // TotalSize The total size of all the blobs or partial blobs read in the test. + TotalSize ByteSize `json:"total_size"` + // TotalSizeBytes The total size of all the blobs or partial blobs read in the test, in bytes. + TotalSizeBytes int64 `json:"total_size_bytes"` + // TotalThrottled The total time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles. + TotalThrottled Duration `json:"total_throttled"` + // TotalThrottledNanos The total time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles, in nanoseconds. + TotalThrottledNanos int64 `json:"total_throttled_nanos"` + // TotalWait The total time spent waiting for the first byte of each read request to be + // received. + TotalWait Duration `json:"total_wait"` + // TotalWaitNanos The total time spent waiting for the first byte of each read request to be + // received, in nanoseconds. + TotalWaitNanos int64 `json:"total_wait_nanos"` +} + +func (s *ReadSummaryInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "max_wait": + if err := dec.Decode(&s.MaxWait); err != nil { + return fmt.Errorf("%s | %w", "MaxWait", err) + } + + case "max_wait_nanos": + if err := dec.Decode(&s.MaxWaitNanos); err != nil { + return fmt.Errorf("%s | %w", "MaxWaitNanos", err) + } + + case "total_elapsed": + if err := dec.Decode(&s.TotalElapsed); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsed", err) + } + + case "total_elapsed_nanos": + if err := dec.Decode(&s.TotalElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsedNanos", err) + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + + case "total_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeBytes", err) + } + s.TotalSizeBytes = value + case float64: + f := int64(v) + s.TotalSizeBytes = f + } + + case "total_throttled": + if err := dec.Decode(&s.TotalThrottled); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottled", err) + } + + case "total_throttled_nanos": + if err := dec.Decode(&s.TotalThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottledNanos", err) + } + + case "total_wait": + if err := dec.Decode(&s.TotalWait); err != nil { + return fmt.Errorf("%s | %w", "TotalWait", err) + } + + case "total_wait_nanos": + if err := dec.Decode(&s.TotalWaitNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalWaitNanos", err) + } + + } + } + return nil +} + +// NewReadSummaryInfo returns a ReadSummaryInfo. +func NewReadSummaryInfo() *ReadSummaryInfo { + r := &ReadSummaryInfo{} + + return r +} + +// false diff --git a/typedapi/types/realmcache.go b/typedapi/types/realmcache.go index 3f73a15772..98083e5aac 100644 --- a/typedapi/types/realmcache.go +++ b/typedapi/types/realmcache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RealmCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L266-L268 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L276-L278 type RealmCache struct { Size int64 `json:"size"` } @@ -77,3 +77,5 @@ func NewRealmCache() *RealmCache { return r } + +// false diff --git a/typedapi/types/realminfo.go b/typedapi/types/realminfo.go index 7c981c104e..4cee1a3ff2 100644 --- a/typedapi/types/realminfo.go +++ b/typedapi/types/realminfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RealmInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/RealmInfo.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RealmInfo.ts#L22-L25 type RealmInfo struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewRealmInfo() *RealmInfo { return r } + +// false diff --git a/typedapi/types/recording.go b/typedapi/types/recording.go index 341c39c977..a7f39807fc 100644 --- a/typedapi/types/recording.go +++ b/typedapi/types/recording.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Recording type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L225-L230 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L225-L230 type Recording struct { CumulativeExecutionCount *int64 `json:"cumulative_execution_count,omitempty"` CumulativeExecutionTime Duration `json:"cumulative_execution_time,omitempty"` @@ -102,3 +102,5 @@ func NewRecording() *Recording { return r } + +// false diff --git a/typedapi/types/recoverybytes.go b/typedapi/types/recoverybytes.go index 95f9bb685f..bb3e01f298 100644 --- a/typedapi/types/recoverybytes.go +++ b/typedapi/types/recoverybytes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RecoveryBytes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L38-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L38-L48 type RecoveryBytes struct { Percent Percentage `json:"percent"` Recovered ByteSize `json:"recovered,omitempty"` @@ -114,3 +114,5 @@ func NewRecoveryBytes() *RecoveryBytes { return r } + +// false diff --git a/typedapi/types/recoveryfiles.go b/typedapi/types/recoveryfiles.go index 4730d31719..df66ccbaa7 100644 --- a/typedapi/types/recoveryfiles.go +++ b/typedapi/types/recoveryfiles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RecoveryFiles type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L56-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L56-L62 type RecoveryFiles struct { Details []FileDetails `json:"details,omitempty"` Percent Percentage `json:"percent"` @@ -121,3 +121,5 @@ func NewRecoveryFiles() *RecoveryFiles { return r } + +// false diff --git a/typedapi/types/recoveryindexstatus.go b/typedapi/types/recoveryindexstatus.go index 2bbbe8815a..8147f5439f 100644 --- a/typedapi/types/recoveryindexstatus.go +++ b/typedapi/types/recoveryindexstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RecoveryIndexStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L64-L74 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L64-L74 type RecoveryIndexStatus struct { Bytes *RecoveryBytes `json:"bytes,omitempty"` Files RecoveryFiles `json:"files"` @@ -114,3 +114,5 @@ func NewRecoveryIndexStatus() *RecoveryIndexStatus { return r } + +// false diff --git a/typedapi/types/recoveryorigin.go b/typedapi/types/recoveryorigin.go index 43d5305133..76827190d7 100644 --- a/typedapi/types/recoveryorigin.go +++ b/typedapi/types/recoveryorigin.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RecoveryOrigin type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L76-L89 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L76-L89 type RecoveryOrigin struct { BootstrapNewHistoryUuid *bool `json:"bootstrap_new_history_uuid,omitempty"` Host *string `json:"host,omitempty"` @@ -149,3 +149,5 @@ func NewRecoveryOrigin() *RecoveryOrigin { return r } + +// false diff --git a/typedapi/types/recoveryrecord.go b/typedapi/types/recoveryrecord.go index b41ac155c5..ee2e44a51b 100644 --- a/typedapi/types/recoveryrecord.go +++ b/typedapi/types/recoveryrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RecoveryRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/recovery/types.ts#L24-L155 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/recovery/types.ts#L24-L155 type RecoveryRecord struct { // Bytes The number of bytes to recover. Bytes *string `json:"bytes,omitempty"` @@ -362,3 +362,5 @@ func NewRecoveryRecord() *RecoveryRecord { return r } + +// false diff --git a/typedapi/types/recoverystartstatus.go b/typedapi/types/recoverystartstatus.go index 0f8d55a9b2..016de50e73 100644 --- a/typedapi/types/recoverystartstatus.go +++ b/typedapi/types/recoverystartstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RecoveryStartStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L91-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L91-L96 type RecoveryStartStatus struct { CheckIndexTime Duration `json:"check_index_time,omitempty"` CheckIndexTimeInMillis int64 `json:"check_index_time_in_millis"` @@ -84,3 +84,5 @@ func NewRecoveryStartStatus() *RecoveryStartStatus { return r } + +// false diff --git a/typedapi/types/recoverystats.go b/typedapi/types/recoverystats.go index 8f85cf4c3e..26de7dd891 100644 --- a/typedapi/types/recoverystats.go +++ b/typedapi/types/recoverystats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RecoveryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L228-L233 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L231-L236 type RecoveryStats struct { CurrentAsSource int64 `json:"current_as_source"` CurrentAsTarget int64 `json:"current_as_target"` @@ -105,3 +105,5 @@ func NewRecoveryStats() *RecoveryStats { return r } + +// false diff --git a/typedapi/types/recoverystatus.go b/typedapi/types/recoverystatus.go index 34b49aaea0..abbf281e24 100644 --- a/typedapi/types/recoverystatus.go +++ b/typedapi/types/recoverystatus.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // RecoveryStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L98-L100 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L98-L100 type RecoveryStatus struct { Shards []ShardRecovery `json:"shards"` } @@ -33,3 +33,5 @@ func NewRecoveryStatus() *RecoveryStatus { return r } + +// false diff --git a/typedapi/types/redact.go b/typedapi/types/redact.go new file mode 100644 index 0000000000..20c3d03dd6 --- /dev/null +++ b/typedapi/types/redact.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Redact type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Simulation.ts#L39-L44 +type Redact struct { + // IsRedacted_ indicates if document has been redacted + IsRedacted_ bool `json:"_is_redacted"` +} + +func (s *Redact) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_is_redacted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsRedacted_", err) + } + s.IsRedacted_ = value + case bool: + s.IsRedacted_ = v + } + + } + } + return nil +} + +// NewRedact returns a Redact. +func NewRedact() *Redact { + r := &Redact{} + + return r +} + +// false diff --git a/typedapi/types/redactprocessor.go b/typedapi/types/redactprocessor.go new file mode 100644 index 0000000000..d3a68858e7 --- /dev/null +++ b/typedapi/types/redactprocessor.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RedactProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1279-L1320 +type RedactProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be redacted + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + PatternDefinitions map[string]string `json:"pattern_definitions,omitempty"` + // Patterns A list of grok expressions to match and redact named captures with + Patterns []string `json:"patterns"` + // Prefix Start a redacted section with this token + Prefix *string `json:"prefix,omitempty"` + // SkipIfUnlicensed If `true` and the current license does not support running redact processors, + // then the processor quietly exits without modifying the document + SkipIfUnlicensed *bool `json:"skip_if_unlicensed,omitempty"` + // Suffix End a redacted section with this token + Suffix *string `json:"suffix,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TraceRedact If `true` then ingest metadata `_ingest._redact._is_redacted` is set to + // `true` if the document has been redacted + TraceRedact *bool `json:"trace_redact,omitempty"` +} + +func (s *RedactProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "pattern_definitions": + if s.PatternDefinitions == nil { + s.PatternDefinitions = make(map[string]string, 0) + } + if err := dec.Decode(&s.PatternDefinitions); err != nil { + return fmt.Errorf("%s | %w", "PatternDefinitions", err) + } + + case "patterns": + if err := dec.Decode(&s.Patterns); err != nil { + return fmt.Errorf("%s | %w", "Patterns", err) + } + + case "prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prefix = &o + + case "skip_if_unlicensed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkipIfUnlicensed", err) + } + s.SkipIfUnlicensed = &value + case bool: + s.SkipIfUnlicensed = &v + } + + case "suffix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Suffix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Suffix = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "trace_redact": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TraceRedact", err) + } + s.TraceRedact = &value + case bool: + s.TraceRedact = &v + } + + } + } + return nil +} + +// NewRedactProcessor returns a RedactProcessor. +func NewRedactProcessor() *RedactProcessor { + r := &RedactProcessor{ + PatternDefinitions: make(map[string]string), + } + + return r +} + +// true + +type RedactProcessorVariant interface { + RedactProcessorCaster() *RedactProcessor +} + +func (s *RedactProcessor) RedactProcessorCaster() *RedactProcessor { + return s +} diff --git a/typedapi/types/refreshstats.go b/typedapi/types/refreshstats.go index aea151abc7..29954729ff 100644 --- a/typedapi/types/refreshstats.go +++ b/typedapi/types/refreshstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RefreshStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L235-L242 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L238-L245 type RefreshStats struct { ExternalTotal int64 `json:"external_total"` ExternalTotalTimeInMillis int64 `json:"external_total_time_in_millis"` @@ -127,3 +127,5 @@ func NewRefreshStats() *RefreshStats { return r } + +// false diff --git a/typedapi/types/regexoptions.go b/typedapi/types/regexoptions.go index cbc598ad00..a479a5cb04 100644 --- a/typedapi/types/regexoptions.go +++ b/typedapi/types/regexoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RegexOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L183-L194 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L183-L194 type RegexOptions struct { // Flags Optional operators for the regular expression. Flags string `json:"flags,omitempty"` @@ -93,3 +93,13 @@ func NewRegexOptions() *RegexOptions { return r } + +// true + +type RegexOptionsVariant interface { + RegexOptionsCaster() *RegexOptions +} + +func (s *RegexOptions) RegexOptionsCaster() *RegexOptions { + return s +} diff --git a/typedapi/types/regexpquery.go b/typedapi/types/regexpquery.go index 07bbf6545d..0b295d5286 100644 --- a/typedapi/types/regexpquery.go +++ b/typedapi/types/regexpquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RegexpQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L187-L217 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L203-L236 type RegexpQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -180,3 +180,13 @@ func NewRegexpQuery() *RegexpQuery { return r } + +// true + +type RegexpQueryVariant interface { + RegexpQueryCaster() *RegexpQuery +} + +func (s *RegexpQuery) RegexpQueryCaster() *RegexpQuery { + return s +} diff --git a/typedapi/types/regexvalidation.go b/typedapi/types/regexvalidation.go index 5a5e477ea1..4a593e8a1b 100644 --- a/typedapi/types/regexvalidation.go +++ b/typedapi/types/regexvalidation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RegexValidation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L78-L81 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L78-L81 type RegexValidation struct { Constraint string `json:"constraint"` Type string `json:"type,omitempty"` @@ -93,3 +93,13 @@ func NewRegexValidation() *RegexValidation { return r } + +// true + +type RegexValidationVariant interface { + RegexValidationCaster() *RegexValidation +} + +func (s *RegexValidation) RegexValidationCaster() *RegexValidation { + return s +} diff --git a/typedapi/types/registereddomainprocessor.go b/typedapi/types/registereddomainprocessor.go new file mode 100644 index 0000000000..0775fc120c --- /dev/null +++ b/typedapi/types/registereddomainprocessor.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RegisteredDomainProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1322-L1338 +type RegisteredDomainProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Field containing the source FQDN. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If true and any required fields are missing, the processor quietly exits + // without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Object field containing extracted domain components. If an empty string, + // the processor adds components to the document’s root. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *RegisteredDomainProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewRegisteredDomainProcessor returns a RegisteredDomainProcessor. +func NewRegisteredDomainProcessor() *RegisteredDomainProcessor { + r := &RegisteredDomainProcessor{} + + return r +} + +// true + +type RegisteredDomainProcessorVariant interface { + RegisteredDomainProcessorCaster() *RegisteredDomainProcessor +} + +func (s *RegisteredDomainProcessor) RegisteredDomainProcessorCaster() *RegisteredDomainProcessor { + return s +} diff --git a/typedapi/types/regressioninferenceoptions.go b/typedapi/types/regressioninferenceoptions.go index 34087d7661..23562325bd 100644 --- a/typedapi/types/regressioninferenceoptions.go +++ b/typedapi/types/regressioninferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RegressionInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L82-L91 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L82-L91 type RegressionInferenceOptions struct { // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` @@ -87,3 +87,13 @@ func NewRegressionInferenceOptions() *RegressionInferenceOptions { return r } + +// true + +type RegressionInferenceOptionsVariant interface { + RegressionInferenceOptionsCaster() *RegressionInferenceOptions +} + +func (s *RegressionInferenceOptions) RegressionInferenceOptionsCaster() *RegressionInferenceOptions { + return s +} diff --git a/typedapi/types/reindexdestination.go b/typedapi/types/reindexdestination.go index 90c0ab6f0c..26afe9418c 100644 --- a/typedapi/types/reindexdestination.go +++ b/typedapi/types/reindexdestination.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,20 +34,26 @@ import ( // ReindexDestination type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/reindex/types.ts#L39-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/reindex/types.ts#L39-L67 type ReindexDestination struct { // Index The name of the data stream, index, or index alias you are copying to. Index string `json:"index"` - // OpType Set to `create` to only index documents that do not already exist. - // Important: To reindex to a data stream destination, this argument must be + // OpType If it is `create`, the operation will only index documents that do not + // already exist (also known as "put if absent"). + // + // IMPORTANT: To reindex to a data stream destination, this argument must be // `create`. OpType *optype.OpType `json:"op_type,omitempty"` // Pipeline The name of the pipeline to use. Pipeline *string `json:"pipeline,omitempty"` - // Routing By default, a document's routing is preserved unless it’s changed by the + // Routing By default, a document's routing is preserved unless it's changed by the // script. - // Set to `discard` to set routing to `null`, or `=value` to route using the - // specified `value`. + // If it is `keep`, the routing on the bulk request sent for each match is set + // to the routing on the match. + // If it is `discard`, the routing on the bulk request sent for each match is + // set to `null`. + // If it is `=value`, the routing on the bulk request sent for each match is set + // to all value specified after the equals sign (`=`). Routing *string `json:"routing,omitempty"` // VersionType The versioning to use for the indexing operation. VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -111,3 +117,13 @@ func NewReindexDestination() *ReindexDestination { return r } + +// true + +type ReindexDestinationVariant interface { + ReindexDestinationCaster() *ReindexDestination +} + +func (s *ReindexDestination) ReindexDestinationCaster() *ReindexDestination { + return s +} diff --git a/typedapi/types/reindexnode.go b/typedapi/types/reindexnode.go index 9156c4d290..011e980042 100644 --- a/typedapi/types/reindexnode.go +++ b/typedapi/types/reindexnode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // ReindexNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/reindex_rethrottle/types.ts#L33-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/reindex_rethrottle/types.ts#L33-L35 type ReindexNode struct { Attributes map[string]string `json:"attributes"` Host string `json:"host"` @@ -107,9 +107,11 @@ func (s *ReindexNode) UnmarshalJSON(data []byte) error { // NewReindexNode returns a ReindexNode. func NewReindexNode() *ReindexNode { r := &ReindexNode{ - Attributes: make(map[string]string, 0), - Tasks: make(map[string]ReindexTask, 0), + Attributes: make(map[string]string), + Tasks: make(map[string]ReindexTask), } return r } + +// false diff --git a/typedapi/types/reindexsource.go b/typedapi/types/reindexsource.go index ed76a2263e..696ab27250 100644 --- a/typedapi/types/reindexsource.go +++ b/typedapi/types/reindexsource.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,26 +31,37 @@ import ( // ReindexSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/reindex/types.ts#L66-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/reindex/types.ts#L69-L110 type ReindexSource struct { // Index The name of the data stream, index, or alias you are copying from. - // Accepts a comma-separated list to reindex from multiple sources. + // It accepts a comma-separated list to reindex from multiple sources. Index []string `json:"index"` - // Query Specifies the documents to reindex using the Query DSL. + // Query The documents to reindex, which is defined with Query DSL. Query *Query `json:"query,omitempty"` // Remote A remote instance of Elasticsearch that you want to index from. Remote *RemoteSource `json:"remote,omitempty"` RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` // Size The number of documents to index per batch. - // Use when indexing from remote to ensure that the batches fit within the - // on-heap buffer, which defaults to a maximum size of 100 MB. + // Use it when you are indexing from remote to ensure that the batches fit + // within the on-heap buffer, which defaults to a maximum size of 100 MB. Size *int `json:"size,omitempty"` // Slice Slice the reindex request manually using the provided slice ID and total // number of slices. - Slice *SlicedScroll `json:"slice,omitempty"` - Sort []SortCombinations `json:"sort,omitempty"` - // SourceFields_ If `true` reindexes all source fields. - // Set to a list to reindex select fields. + Slice *SlicedScroll `json:"slice,omitempty"` + // Sort A comma-separated list of `:` pairs to sort by before + // indexing. + // Use it in conjunction with `max_docs` to control what documents are + // reindexed. + // + // WARNING: Sort in reindex is deprecated. + // Sorting in reindex was never guaranteed to index documents in order and + // prevents further development of reindex such as resilience and performance + // improvements. + // If used in combination with `max_docs`, consider using a query filter + // instead. + Sort []SortCombinations `json:"sort,omitempty"` + // SourceFields_ If `true`, reindex all source fields. + // Set it to a list to reindex select fields. SourceFields_ []string `json:"_source,omitempty"` } @@ -164,3 +175,13 @@ func NewReindexSource() *ReindexSource { return r } + +// true + +type ReindexSourceVariant interface { + ReindexSourceCaster() *ReindexSource +} + +func (s *ReindexSource) ReindexSourceCaster() *ReindexSource { + return s +} diff --git a/typedapi/types/reindexstatus.go b/typedapi/types/reindexstatus.go index 9413686e18..e121f2f130 100644 --- a/typedapi/types/reindexstatus.go +++ b/typedapi/types/reindexstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ReindexStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/reindex_rethrottle/types.ts#L37-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/reindex_rethrottle/types.ts#L37-L85 type ReindexStatus struct { // Batches The number of scroll responses pulled back by the reindex. Batches int64 `json:"batches"` @@ -237,3 +237,5 @@ func NewReindexStatus() *ReindexStatus { return r } + +// false diff --git a/typedapi/types/reindextask.go b/typedapi/types/reindextask.go index 4bf44eb6c2..3ee0ac6469 100644 --- a/typedapi/types/reindextask.go +++ b/typedapi/types/reindextask.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ReindexTask type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/reindex_rethrottle/types.ts#L87-L98 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/reindex_rethrottle/types.ts#L87-L98 type ReindexTask struct { Action string `json:"action"` Cancellable bool `json:"cancellable"` @@ -161,3 +161,5 @@ func NewReindexTask() *ReindexTask { return r } + +// false diff --git a/typedapi/types/reloaddetails.go b/typedapi/types/reloaddetails.go index fb881978bf..ab37343383 100644 --- a/typedapi/types/reloaddetails.go +++ b/typedapi/types/reloaddetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ReloadDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/reload_search_analyzers/types.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/reload_search_analyzers/types.ts#L27-L31 type ReloadDetails struct { Index string `json:"index"` ReloadedAnalyzers []string `json:"reloaded_analyzers"` @@ -86,3 +86,5 @@ func NewReloadDetails() *ReloadDetails { return r } + +// false diff --git a/typedapi/types/reloadresult.go b/typedapi/types/reloadresult.go index 8fd285c374..200ec9925b 100644 --- a/typedapi/types/reloadresult.go +++ b/typedapi/types/reloadresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ReloadResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/reload_search_analyzers/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/reload_search_analyzers/types.ts#L22-L25 type ReloadResult struct { ReloadDetails []ReloadDetails `json:"reload_details"` Shards_ ShardStatistics `json:"_shards"` @@ -34,3 +34,5 @@ func NewReloadResult() *ReloadResult { return r } + +// false diff --git a/typedapi/types/relocationfailureinfo.go b/typedapi/types/relocationfailureinfo.go index b18666fdd3..109164b089 100644 --- a/typedapi/types/relocationfailureinfo.go +++ b/typedapi/types/relocationfailureinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RelocationFailureInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Node.ts#L73-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Node.ts#L67-L69 type RelocationFailureInfo struct { FailedAttempts int `json:"failed_attempts"` } @@ -78,3 +78,5 @@ func NewRelocationFailureInfo() *RelocationFailureInfo { return r } + +// false diff --git a/typedapi/types/remoteclusterprivileges.go b/typedapi/types/remoteclusterprivileges.go new file mode 100644 index 0000000000..c00b4eb1f0 --- /dev/null +++ b/typedapi/types/remoteclusterprivileges.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/remoteclusterprivilege" +) + +// RemoteClusterPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L278-L290 +type RemoteClusterPrivileges struct { + // Clusters A list of cluster aliases to which the permissions in this entry apply. + Clusters []string `json:"clusters"` + // Privileges The cluster level privileges that owners of the role have on the remote + // cluster. + Privileges []remoteclusterprivilege.RemoteClusterPrivilege `json:"privileges"` +} + +func (s *RemoteClusterPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "clusters": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + + s.Clusters = append(s.Clusters, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Clusters); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + } + } + return nil +} + +// NewRemoteClusterPrivileges returns a RemoteClusterPrivileges. +func NewRemoteClusterPrivileges() *RemoteClusterPrivileges { + r := &RemoteClusterPrivileges{} + + return r +} + +// true + +type RemoteClusterPrivilegesVariant interface { + RemoteClusterPrivilegesCaster() *RemoteClusterPrivileges +} + +func (s *RemoteClusterPrivileges) RemoteClusterPrivilegesCaster() *RemoteClusterPrivileges { + return s +} diff --git a/typedapi/types/remoteindicesprivileges.go b/typedapi/types/remoteindicesprivileges.go new file mode 100644 index 0000000000..c2bdd9b7a2 --- /dev/null +++ b/typedapi/types/remoteindicesprivileges.go @@ -0,0 +1,186 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege" +) + +// RemoteIndicesPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L244-L276 +type RemoteIndicesPrivileges struct { + // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that + // cover restricted indices. Implicitly, restricted indices have limited + // privileges that can cause pattern tests to fail. If restricted indices are + // explicitly included in the `names` list, Elasticsearch checks privileges + // against these indices regardless of the value set for + // `allow_restricted_indices`. + AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` + // Clusters A list of cluster aliases to which the permissions in this entry apply. + Clusters []string `json:"clusters"` + // FieldSecurity The document fields that the owners of the role have read access to. + FieldSecurity *FieldSecurity `json:"field_security,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` + // Privileges The index level privileges that owners of the role have on the specified + // indices. + Privileges []indexprivilege.IndexPrivilege `json:"privileges"` + // Query A search query that defines the documents the owners of the role have access + // to. A document within the specified indices must match this query for it to + // be accessible by the owners of the role. + Query IndicesPrivilegesQuery `json:"query,omitempty"` +} + +func (s *RemoteIndicesPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "clusters": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + + s.Clusters = append(s.Clusters, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Clusters); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return fmt.Errorf("%s | %w", "FieldSecurity", err) + } + + case "names": + if err := dec.Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + case "query": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + query_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Query", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + case "template": + o := NewRoleTemplateQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + } + } + if s.Query == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + } + + } + } + return nil +} + +// NewRemoteIndicesPrivileges returns a RemoteIndicesPrivileges. +func NewRemoteIndicesPrivileges() *RemoteIndicesPrivileges { + r := &RemoteIndicesPrivileges{} + + return r +} + +// true + +type RemoteIndicesPrivilegesVariant interface { + RemoteIndicesPrivilegesCaster() *RemoteIndicesPrivileges +} + +func (s *RemoteIndicesPrivileges) RemoteIndicesPrivilegesCaster() *RemoteIndicesPrivileges { + return s +} diff --git a/typedapi/types/remotesource.go b/typedapi/types/remotesource.go index 43a29927fd..c80f8dcc0c 100644 --- a/typedapi/types/remotesource.go +++ b/typedapi/types/remotesource.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,18 +30,18 @@ import ( // RemoteSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/reindex/types.ts#L99-L125 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/reindex/types.ts#L112-L140 type RemoteSource struct { // ConnectTimeout The remote connection timeout. - // Defaults to 30 seconds. ConnectTimeout Duration `json:"connect_timeout,omitempty"` // Headers An object containing the headers of the request. Headers map[string]string `json:"headers,omitempty"` // Host The URL for the remote instance of Elasticsearch that you want to index from. + // This information is required when you're indexing from remote. Host string `json:"host"` // Password The password to use for authentication with the remote host. Password *string `json:"password,omitempty"` - // SocketTimeout The remote socket read timeout. Defaults to 30 seconds. + // SocketTimeout The remote socket read timeout. SocketTimeout Duration `json:"socket_timeout,omitempty"` // Username The username to use for authentication with the remote host. Username *string `json:"username,omitempty"` @@ -103,8 +103,18 @@ func (s *RemoteSource) UnmarshalJSON(data []byte) error { // NewRemoteSource returns a RemoteSource. func NewRemoteSource() *RemoteSource { r := &RemoteSource{ - Headers: make(map[string]string, 0), + Headers: make(map[string]string), } return r } + +// true + +type RemoteSourceVariant interface { + RemoteSourceCaster() *RemoteSource +} + +func (s *RemoteSource) RemoteSourceCaster() *RemoteSource { + return s +} diff --git a/typedapi/types/remoteuserindicesprivileges.go b/typedapi/types/remoteuserindicesprivileges.go new file mode 100644 index 0000000000..e6dbba2f35 --- /dev/null +++ b/typedapi/types/remoteuserindicesprivileges.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege" +) + +// RemoteUserIndicesPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L316-L339 +type RemoteUserIndicesPrivileges struct { + // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that + // cover restricted indices. Implicitly, restricted indices have limited + // privileges that can cause pattern tests to fail. If restricted indices are + // explicitly included in the `names` list, Elasticsearch checks privileges + // against these indices regardless of the value set for + // `allow_restricted_indices`. + AllowRestrictedIndices bool `json:"allow_restricted_indices"` + Clusters []string `json:"clusters"` + // FieldSecurity The document fields that the owners of the role have read access to. + FieldSecurity []FieldSecurity `json:"field_security,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` + // Privileges The index level privileges that owners of the role have on the specified + // indices. + Privileges []indexprivilege.IndexPrivilege `json:"privileges"` + // Query Search queries that define the documents the user has access to. A document + // within the specified indices must match these queries for it to be accessible + // by the owners of the role. + Query []IndicesPrivilegesQuery `json:"query,omitempty"` +} + +func (s *RemoteUserIndicesPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = value + case bool: + s.AllowRestrictedIndices = v + } + + case "clusters": + if err := dec.Decode(&s.Clusters); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return fmt.Errorf("%s | %w", "FieldSecurity", err) + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + case "query": + messageArray := []json.RawMessage{} + if err := dec.Decode(&messageArray); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + query_field: + for _, message := range messageArray { + keyDec := json.NewDecoder(bytes.NewReader(message)) + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Query", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + continue query_field + + case "template": + o := NewRoleTemplateQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + continue query_field + + } + } + + var o any + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + } + + } + } + return nil +} + +// NewRemoteUserIndicesPrivileges returns a RemoteUserIndicesPrivileges. +func NewRemoteUserIndicesPrivileges() *RemoteUserIndicesPrivileges { + r := &RemoteUserIndicesPrivileges{} + + return r +} + +// false diff --git a/typedapi/types/removeaction.go b/typedapi/types/removeaction.go index da67d0e2b0..a70263d10a 100644 --- a/typedapi/types/removeaction.go +++ b/typedapi/types/removeaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RemoveAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/update_aliases/types.ts#L97-L122 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/update_aliases/types.ts#L97-L122 type RemoveAction struct { // Alias Alias for the action. // Index alias names support date math. @@ -131,3 +131,13 @@ func NewRemoveAction() *RemoveAction { return r } + +// true + +type RemoveActionVariant interface { + RemoveActionCaster() *RemoveAction +} + +func (s *RemoveAction) RemoveActionCaster() *RemoveAction { + return s +} diff --git a/typedapi/types/removeduplicatestokenfilter.go b/typedapi/types/removeduplicatestokenfilter.go index a5d3f0fb61..16656f634b 100644 --- a/typedapi/types/removeduplicatestokenfilter.go +++ b/typedapi/types/removeduplicatestokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RemoveDuplicatesTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L303-L305 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L302-L304 type RemoveDuplicatesTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewRemoveDuplicatesTokenFilter() *RemoveDuplicatesTokenFilter { return r } + +// true + +type RemoveDuplicatesTokenFilterVariant interface { + RemoveDuplicatesTokenFilterCaster() *RemoveDuplicatesTokenFilter +} + +func (s *RemoveDuplicatesTokenFilter) RemoveDuplicatesTokenFilterCaster() *RemoveDuplicatesTokenFilter { + return s +} diff --git a/typedapi/types/removeindexaction.go b/typedapi/types/removeindexaction.go index 6c7f045cc3..6c172d7bca 100644 --- a/typedapi/types/removeindexaction.go +++ b/typedapi/types/removeindexaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RemoveIndexAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/update_aliases/types.ts#L124-L139 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/update_aliases/types.ts#L124-L139 type RemoveIndexAction struct { // Index Data stream or index for the action. // Supports wildcards (`*`). @@ -104,3 +104,13 @@ func NewRemoveIndexAction() *RemoveIndexAction { return r } + +// true + +type RemoveIndexActionVariant interface { + RemoveIndexActionCaster() *RemoveIndexAction +} + +func (s *RemoveIndexAction) RemoveIndexActionCaster() *RemoveIndexAction { + return s +} diff --git a/typedapi/types/removeprocessor.go b/typedapi/types/removeprocessor.go index 7e6c4f7c7c..b6ffad7f45 100644 --- a/typedapi/types/removeprocessor.go +++ b/typedapi/types/removeprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RemoveProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L941-L955 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1340-L1354 type RemoveProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -182,3 +182,13 @@ func NewRemoveProcessor() *RemoveProcessor { return r } + +// true + +type RemoveProcessorVariant interface { + RemoveProcessorCaster() *RemoveProcessor +} + +func (s *RemoveProcessor) RemoveProcessorCaster() *RemoveProcessor { + return s +} diff --git a/typedapi/types/renameprocessor.go b/typedapi/types/renameprocessor.go index 80afe39241..a9362f0e0b 100644 --- a/typedapi/types/renameprocessor.go +++ b/typedapi/types/renameprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RenameProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L957-L973 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1356-L1372 type RenameProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -161,3 +161,13 @@ func NewRenameProcessor() *RenameProcessor { return r } + +// true + +type RenameProcessorVariant interface { + RenameProcessorCaster() *RenameProcessor +} + +func (s *RenameProcessor) RenameProcessorCaster() *RenameProcessor { + return s +} diff --git a/typedapi/types/replicationaccess.go b/typedapi/types/replicationaccess.go new file mode 100644 index 0000000000..fadafc5899 --- /dev/null +++ b/typedapi/types/replicationaccess.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReplicationAccess type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L442-L452 +type ReplicationAccess struct { + // AllowRestrictedIndices This needs to be set to true if the patterns in the names field should cover + // system indices. + AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` +} + +func (s *ReplicationAccess) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + } + } + return nil +} + +// NewReplicationAccess returns a ReplicationAccess. +func NewReplicationAccess() *ReplicationAccess { + r := &ReplicationAccess{} + + return r +} + +// true + +type ReplicationAccessVariant interface { + ReplicationAccessCaster() *ReplicationAccess +} + +func (s *ReplicationAccess) ReplicationAccessCaster() *ReplicationAccess { + return s +} diff --git a/typedapi/types/reportingemailattachment.go b/typedapi/types/reportingemailattachment.go index ef6371af13..bf6c1aefdb 100644 --- a/typedapi/types/reportingemailattachment.go +++ b/typedapi/types/reportingemailattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ReportingEmailAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L224-L232 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L224-L232 type ReportingEmailAttachment struct { Inline *bool `json:"inline,omitempty"` Interval Duration `json:"interval,omitempty"` @@ -118,3 +118,13 @@ func NewReportingEmailAttachment() *ReportingEmailAttachment { return r } + +// true + +type ReportingEmailAttachmentVariant interface { + ReportingEmailAttachmentCaster() *ReportingEmailAttachment +} + +func (s *ReportingEmailAttachment) ReportingEmailAttachmentCaster() *ReportingEmailAttachment { + return s +} diff --git a/typedapi/types/repositoriesrecord.go b/typedapi/types/repositoriesrecord.go index fd7b8bde55..01665e36de 100644 --- a/typedapi/types/repositoriesrecord.go +++ b/typedapi/types/repositoriesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RepositoriesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/repositories/types.ts#L20-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/repositories/types.ts#L20-L31 type RepositoriesRecord struct { // Id The unique repository identifier. Id *string `json:"id,omitempty"` @@ -89,3 +89,5 @@ func NewRepositoriesRecord() *RepositoriesRecord { return r } + +// false diff --git a/typedapi/types/repository.go b/typedapi/types/repository.go index 61db7a030d..113ef93c5f 100644 --- a/typedapi/types/repository.go +++ b/typedapi/types/repository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,5 +29,9 @@ package types // ReadOnlyUrlRepository // SourceOnlyRepository // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L24-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L24-L34 type Repository any + +type RepositoryVariant interface { + RepositoryCaster() *Repository +} diff --git a/typedapi/types/repositoryintegrityindicator.go b/typedapi/types/repositoryintegrityindicator.go index 5282f9d4df..4e2cf5ea9c 100644 --- a/typedapi/types/repositoryintegrityindicator.go +++ b/typedapi/types/repositoryintegrityindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // RepositoryIntegrityIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L135-L139 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L136-L140 type RepositoryIntegrityIndicator struct { Details *RepositoryIntegrityIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewRepositoryIntegrityIndicator() *RepositoryIntegrityIndicator { return r } + +// false diff --git a/typedapi/types/repositoryintegrityindicatordetails.go b/typedapi/types/repositoryintegrityindicatordetails.go index c2afa54390..88809a15d2 100644 --- a/typedapi/types/repositoryintegrityindicatordetails.go +++ b/typedapi/types/repositoryintegrityindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RepositoryIntegrityIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L140-L144 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L141-L145 type RepositoryIntegrityIndicatorDetails struct { Corrupted []string `json:"corrupted,omitempty"` CorruptedRepositories *int64 `json:"corrupted_repositories,omitempty"` @@ -99,3 +99,5 @@ func NewRepositoryIntegrityIndicatorDetails() *RepositoryIntegrityIndicatorDetai return r } + +// false diff --git a/typedapi/types/repositorylocation.go b/typedapi/types/repositorylocation.go index c6e8e076ec..216fbada79 100644 --- a/typedapi/types/repositorylocation.go +++ b/typedapi/types/repositorylocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RepositoryLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/RepositoryMeteringInformation.ts#L68-L74 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/RepositoryMeteringInformation.ts#L68-L74 type RepositoryLocation struct { BasePath string `json:"base_path"` // Bucket Bucket name (GCP, S3) @@ -102,3 +102,5 @@ func NewRepositoryLocation() *RepositoryLocation { return r } + +// false diff --git a/typedapi/types/repositorymeteringinformation.go b/typedapi/types/repositorymeteringinformation.go index 0ee9b82719..6a98aaf3ff 100644 --- a/typedapi/types/repositorymeteringinformation.go +++ b/typedapi/types/repositorymeteringinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RepositoryMeteringInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/RepositoryMeteringInformation.ts#L24-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/RepositoryMeteringInformation.ts#L24-L66 type RepositoryMeteringInformation struct { // Archived A flag that tells whether or not this object has been archived. When a // repository is closed or updated the @@ -154,3 +154,5 @@ func NewRepositoryMeteringInformation() *RepositoryMeteringInformation { return r } + +// false diff --git a/typedapi/types/requestcachestats.go b/typedapi/types/requestcachestats.go index 42b1d140dc..9446beba6d 100644 --- a/typedapi/types/requestcachestats.go +++ b/typedapi/types/requestcachestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RequestCacheStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L244-L250 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L247-L253 type RequestCacheStats struct { Evictions int64 `json:"evictions"` HitCount int64 `json:"hit_count"` @@ -138,3 +138,5 @@ func NewRequestCacheStats() *RequestCacheStats { return r } + +// false diff --git a/typedapi/types/requestcounts.go b/typedapi/types/requestcounts.go index 4a5c471656..3b2de60a25 100644 --- a/typedapi/types/requestcounts.go +++ b/typedapi/types/requestcounts.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RequestCounts type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/RepositoryMeteringInformation.ts#L76-L103 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/RepositoryMeteringInformation.ts#L76-L103 type RequestCounts struct { // GetBlob Number of Get Blob requests (Azure) GetBlob *int64 `json:"GetBlob,omitempty"` @@ -253,3 +253,5 @@ func NewRequestCounts() *RequestCounts { return r } + +// false diff --git a/typedapi/types/requestitem.go b/typedapi/types/requestitem.go index 25661b4968..879f566f6a 100644 --- a/typedapi/types/requestitem.go +++ b/typedapi/types/requestitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // MultisearchHeader // TemplateConfig // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch_template/types.ts#L25-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch_template/types.ts#L25-L26 type RequestItem any + +type RequestItemVariant interface { + RequestItemCaster() *RequestItem +} diff --git a/typedapi/types/reroutedecision.go b/typedapi/types/reroutedecision.go index 61b22f7f5d..04f5b7af37 100644 --- a/typedapi/types/reroutedecision.go +++ b/typedapi/types/reroutedecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RerouteDecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/types.ts#L86-L90 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/types.ts#L86-L90 type RerouteDecision struct { Decider string `json:"decider"` Decision string `json:"decision"` @@ -100,3 +100,5 @@ func NewRerouteDecision() *RerouteDecision { return r } + +// false diff --git a/typedapi/types/rerouteexplanation.go b/typedapi/types/rerouteexplanation.go index fbb0752c08..18402f8426 100644 --- a/typedapi/types/rerouteexplanation.go +++ b/typedapi/types/rerouteexplanation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RerouteExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/types.ts#L92-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/types.ts#L92-L96 type RerouteExplanation struct { Command string `json:"command"` Decisions []RerouteDecision `json:"decisions"` @@ -86,3 +86,5 @@ func NewRerouteExplanation() *RerouteExplanation { return r } + +// false diff --git a/typedapi/types/rerouteparameters.go b/typedapi/types/rerouteparameters.go index b3bb346a20..c73dc2612b 100644 --- a/typedapi/types/rerouteparameters.go +++ b/typedapi/types/rerouteparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RerouteParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/reroute/types.ts#L98-L105 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/reroute/types.ts#L98-L105 type RerouteParameters struct { AllowPrimary bool `json:"allow_primary"` FromNode *string `json:"from_node,omitempty"` @@ -117,3 +117,5 @@ func NewRerouteParameters() *RerouteParameters { return r } + +// false diff --git a/typedapi/types/rerouteprocessor.go b/typedapi/types/rerouteprocessor.go index 8eec914358..4a770eb76b 100644 --- a/typedapi/types/rerouteprocessor.go +++ b/typedapi/types/rerouteprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RerouteProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L975-L1003 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1374-L1402 type RerouteProcessor struct { // Dataset Field references or a static value for the dataset part of the data stream // name. @@ -203,3 +203,13 @@ func NewRerouteProcessor() *RerouteProcessor { return r } + +// true + +type RerouteProcessorVariant interface { + RerouteProcessorCaster() *RerouteProcessor +} + +func (s *RerouteProcessor) RerouteProcessorCaster() *RerouteProcessor { + return s +} diff --git a/typedapi/types/rescore.go b/typedapi/types/rescore.go index 3bd62282e2..1f664c9f1f 100644 --- a/typedapi/types/rescore.go +++ b/typedapi/types/rescore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,12 @@ import ( // Rescore type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/rescoring.ts#L25-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/rescoring.ts#L25-L38 type Rescore struct { - LearningToRank *LearningToRank `json:"learning_to_rank,omitempty"` - Query *RescoreQuery `json:"query,omitempty"` - WindowSize *int `json:"window_size,omitempty"` + AdditionalRescoreProperty map[string]json.RawMessage `json:"-"` + LearningToRank *LearningToRank `json:"learning_to_rank,omitempty"` + Query *RescoreQuery `json:"query,omitempty"` + WindowSize *int `json:"window_size,omitempty"` } func (s *Rescore) UnmarshalJSON(data []byte) error { @@ -79,14 +80,68 @@ func (s *Rescore) UnmarshalJSON(data []byte) error { s.WindowSize = &f } + default: + + if key, ok := t.(string); ok { + if s.AdditionalRescoreProperty == nil { + s.AdditionalRescoreProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalRescoreProperty", err) + } + s.AdditionalRescoreProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s Rescore) MarshalJSON() ([]byte, error) { + type opt Rescore + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRescoreProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRescoreProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewRescore returns a Rescore. func NewRescore() *Rescore { - r := &Rescore{} + r := &Rescore{ + AdditionalRescoreProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type RescoreVariant interface { + RescoreCaster() *Rescore +} + +func (s *Rescore) RescoreCaster() *Rescore { + return s +} diff --git a/typedapi/types/rescorequery.go b/typedapi/types/rescorequery.go index 9d81908e34..84e86ae891 100644 --- a/typedapi/types/rescorequery.go +++ b/typedapi/types/rescorequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // RescoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/rescoring.ts#L40-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/rescoring.ts#L40-L62 type RescoreQuery struct { // Query The query to use for rescoring. // This query is only run on the Top-K results returned by the `query` and @@ -115,3 +115,13 @@ func NewRescoreQuery() *RescoreQuery { return r } + +// true + +type RescoreQueryVariant interface { + RescoreQueryCaster() *RescoreQuery +} + +func (s *RescoreQuery) RescoreQueryCaster() *RescoreQuery { + return s +} diff --git a/typedapi/types/hotthread.go b/typedapi/types/rescorevector.go similarity index 51% rename from typedapi/types/hotthread.go rename to typedapi/types/rescorevector.go index c158fb27a0..9eea524e31 100644 --- a/typedapi/types/hotthread.go +++ b/typedapi/types/rescorevector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,19 +26,18 @@ import ( "errors" "fmt" "io" + "strconv" ) -// HotThread type. +// RescoreVector type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/hot_threads/types.ts#L23-L28 -type HotThread struct { - Hosts []string `json:"hosts"` - NodeId string `json:"node_id"` - NodeName string `json:"node_name"` - Threads []string `json:"threads"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Knn.ts#L30-L33 +type RescoreVector struct { + // Oversample Applies the specified oversample factor to k on the approximate kNN search + Oversample float32 `json:"oversample"` } -func (s *HotThread) UnmarshalJSON(data []byte) error { +func (s *RescoreVector) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -53,24 +52,20 @@ func (s *HotThread) UnmarshalJSON(data []byte) error { switch t { - case "hosts": - if err := dec.Decode(&s.Hosts); err != nil { - return fmt.Errorf("%s | %w", "Hosts", err) - } - - case "node_id": - if err := dec.Decode(&s.NodeId); err != nil { - return fmt.Errorf("%s | %w", "NodeId", err) - } - - case "node_name": - if err := dec.Decode(&s.NodeName); err != nil { - return fmt.Errorf("%s | %w", "NodeName", err) - } - - case "threads": - if err := dec.Decode(&s.Threads); err != nil { - return fmt.Errorf("%s | %w", "Threads", err) + case "oversample": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Oversample", err) + } + f := float32(value) + s.Oversample = f + case float64: + f := float32(v) + s.Oversample = f } } @@ -78,9 +73,19 @@ func (s *HotThread) UnmarshalJSON(data []byte) error { return nil } -// NewHotThread returns a HotThread. -func NewHotThread() *HotThread { - r := &HotThread{} +// NewRescoreVector returns a RescoreVector. +func NewRescoreVector() *RescoreVector { + r := &RescoreVector{} return r } + +// true + +type RescoreVectorVariant interface { + RescoreVectorCaster() *RescoreVector +} + +func (s *RescoreVector) RescoreVectorCaster() *RescoreVector { + return s +} diff --git a/typedapi/types/reservedsize.go b/typedapi/types/reservedsize.go index ed6d6245f6..19c4c6cdc3 100644 --- a/typedapi/types/reservedsize.go +++ b/typedapi/types/reservedsize.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ReservedSize type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L71-L76 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L72-L77 type ReservedSize struct { NodeId string `json:"node_id"` Path string `json:"path"` @@ -102,3 +102,5 @@ func NewReservedSize() *ReservedSize { return r } + +// false diff --git a/typedapi/types/resolveclusterinfo.go b/typedapi/types/resolveclusterinfo.go index 35711e21b7..c7955def5f 100644 --- a/typedapi/types/resolveclusterinfo.go +++ b/typedapi/types/resolveclusterinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,20 +31,20 @@ import ( // ResolveClusterInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L29-L55 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L29-L55 type ResolveClusterInfo struct { // Connected Whether the remote cluster is connected to the local (querying) cluster. Connected bool `json:"connected"` // Error Provides error messages that are likely to occur if you do a search with this // index expression - // on the specified cluster (e.g., lack of security privileges to query an - // index). + // on the specified cluster (for example, lack of security privileges to query + // an index). Error *string `json:"error,omitempty"` // MatchingIndices Whether the index expression provided in the request matches any indices, // aliases or data streams // on the cluster. MatchingIndices *bool `json:"matching_indices,omitempty"` - // SkipUnavailable The skip_unavailable setting for a remote cluster. + // SkipUnavailable The `skip_unavailable` setting for a remote cluster. SkipUnavailable bool `json:"skip_unavailable"` // Version Provides version information about the cluster. Version *ElasticsearchVersionMinInfo `json:"version,omitempty"` @@ -135,3 +135,5 @@ func NewResolveClusterInfo() *ResolveClusterInfo { return r } + +// false diff --git a/typedapi/types/resolveindexaliasitem.go b/typedapi/types/resolveindexaliasitem.go index 123a069e72..45033a9e29 100644 --- a/typedapi/types/resolveindexaliasitem.go +++ b/typedapi/types/resolveindexaliasitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ResolveIndexAliasItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/resolve_index/ResolveIndexResponse.ts#L37-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/resolve_index/ResolveIndexResponse.ts#L37-L40 type ResolveIndexAliasItem struct { Indices []string `json:"indices"` Name string `json:"name"` @@ -83,3 +83,5 @@ func NewResolveIndexAliasItem() *ResolveIndexAliasItem { return r } + +// false diff --git a/typedapi/types/resolveindexdatastreamsitem.go b/typedapi/types/resolveindexdatastreamsitem.go index 43c617f36d..5bc3b5ceb8 100644 --- a/typedapi/types/resolveindexdatastreamsitem.go +++ b/typedapi/types/resolveindexdatastreamsitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ResolveIndexDataStreamsItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/resolve_index/ResolveIndexResponse.ts#L42-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/resolve_index/ResolveIndexResponse.ts#L42-L46 type ResolveIndexDataStreamsItem struct { BackingIndices []string `json:"backing_indices"` Name string `json:"name"` @@ -89,3 +89,5 @@ func NewResolveIndexDataStreamsItem() *ResolveIndexDataStreamsItem { return r } + +// false diff --git a/typedapi/types/resolveindexitem.go b/typedapi/types/resolveindexitem.go index 6db3662ab6..b270d9958e 100644 --- a/typedapi/types/resolveindexitem.go +++ b/typedapi/types/resolveindexitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ResolveIndexItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/resolve_index/ResolveIndexResponse.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/resolve_index/ResolveIndexResponse.ts#L30-L35 type ResolveIndexItem struct { Aliases []string `json:"aliases,omitempty"` Attributes []string `json:"attributes"` @@ -84,3 +84,5 @@ func NewResolveIndexItem() *ResolveIndexItem { return r } + +// false diff --git a/typedapi/types/resourceprivileges.go b/typedapi/types/resourceprivileges.go index c608d8ff2e..a4fabf7d02 100644 --- a/typedapi/types/resourceprivileges.go +++ b/typedapi/types/resourceprivileges.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ResourcePrivileges type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/has_privileges/types.ts#L47-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/has_privileges/types.ts#L48-L48 type ResourcePrivileges map[string]Privileges diff --git a/typedapi/types/responsebody.go b/typedapi/types/responsebody.go deleted file mode 100644 index 212baacca8..0000000000 --- a/typedapi/types/responsebody.go +++ /dev/null @@ -1,782 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "strings" -) - -// ResponseBody type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/SearchResponse.ts#L38-L54 -type ResponseBody struct { - Aggregations map[string]Aggregate `json:"aggregations,omitempty"` - Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits HitsMetadata `json:"hits"` - MaxScore *Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` - Shards_ ShardStatistics `json:"_shards"` - Suggest map[string][]Suggest `json:"suggest,omitempty"` - TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` -} - -func (s *ResponseBody) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "aggregations": - if s.Aggregations == nil { - s.Aggregations = make(map[string]Aggregate, 0) - } - - for dec.More() { - tt, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - if s.Aggregations == nil { - s.Aggregations = make(map[string]Aggregate, 0) - } - switch elems[0] { - - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "min": - o := NewMinAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "max": - o := NewMaxAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "sum": - o := NewSumAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "parent": - o := NewParentAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "range": - o := NewRangeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "frequent_item_sets": - o := NewFrequentItemSetsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "boxplot": - o := NewBoxPlotAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "rate": - o := NewRateAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - - default: - o := make(map[string]any, 0) - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]any, 0) - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Aggregations", err) - } - s.Aggregations[value] = o - } - } - } - - case "_clusters": - if err := dec.Decode(&s.Clusters_); err != nil { - return fmt.Errorf("%s | %w", "Clusters_", err) - } - - case "fields": - if s.Fields == nil { - s.Fields = make(map[string]json.RawMessage, 0) - } - if err := dec.Decode(&s.Fields); err != nil { - return fmt.Errorf("%s | %w", "Fields", err) - } - - case "hits": - if err := dec.Decode(&s.Hits); err != nil { - return fmt.Errorf("%s | %w", "Hits", err) - } - - case "max_score": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("%s | %w", "MaxScore", err) - } - f := Float64(value) - s.MaxScore = &f - case float64: - f := Float64(v) - s.MaxScore = &f - } - - case "num_reduce_phases": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "NumReducePhases", err) - } - s.NumReducePhases = &value - case float64: - f := int64(v) - s.NumReducePhases = &f - } - - case "pit_id": - if err := dec.Decode(&s.PitId); err != nil { - return fmt.Errorf("%s | %w", "PitId", err) - } - - case "profile": - if err := dec.Decode(&s.Profile); err != nil { - return fmt.Errorf("%s | %w", "Profile", err) - } - - case "_scroll_id": - if err := dec.Decode(&s.ScrollId_); err != nil { - return fmt.Errorf("%s | %w", "ScrollId_", err) - } - - case "_shards": - if err := dec.Decode(&s.Shards_); err != nil { - return fmt.Errorf("%s | %w", "Shards_", err) - } - - case "suggest": - if s.Suggest == nil { - s.Suggest = make(map[string][]Suggest, 0) - } - - for dec.More() { - tt, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - if s.Suggest == nil { - s.Suggest = make(map[string][]Suggest, 0) - } - switch elems[0] { - - case "completion": - o := NewCompletionSuggest() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) - - case "phrase": - o := NewPhraseSuggest() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) - - case "term": - o := NewTermSuggest() - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) - - default: - o := make(map[string]any, 0) - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) - } - } else { - return errors.New("cannot decode JSON for field Suggest") - } - } else { - o := make(map[string]any, 0) - if err := dec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Suggest", err) - } - s.Suggest[value] = append(s.Suggest[value], o) - } - } - } - - case "terminated_early": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "TerminatedEarly", err) - } - s.TerminatedEarly = &value - case bool: - s.TerminatedEarly = &v - } - - case "timed_out": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "TimedOut", err) - } - s.TimedOut = value - case bool: - s.TimedOut = v - } - - case "took": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "Took", err) - } - s.Took = value - case float64: - f := int64(v) - s.Took = f - } - - } - } - return nil -} - -// NewResponseBody returns a ResponseBody. -func NewResponseBody() *ResponseBody { - r := &ResponseBody{ - Aggregations: make(map[string]Aggregate, 0), - Fields: make(map[string]json.RawMessage, 0), - Suggest: make(map[string][]Suggest, 0), - } - - return r -} diff --git a/typedapi/types/responseitem.go b/typedapi/types/responseitem.go index c744da5d29..acbca35a46 100644 --- a/typedapi/types/responseitem.go +++ b/typedapi/types/responseitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,38 +27,43 @@ import ( "fmt" "io" "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/failurestorestatus" ) // ResponseItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/types.ts#L37-L81 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/types.ts#L37-L84 type ResponseItem struct { - // Error Contains additional information about the failed operation. - // The parameter is only returned for failed operations. - Error *ErrorCause `json:"error,omitempty"` - ForcedRefresh *bool `json:"forced_refresh,omitempty"` - Get *InlineGetDictUserDefined `json:"get,omitempty"` + // Error Additional information about the failed operation. + // The property is returned only for failed operations. + Error *ErrorCause `json:"error,omitempty"` + FailureStore *failurestorestatus.FailureStoreStatus `json:"failure_store,omitempty"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + Get *InlineGetDictUserDefined `json:"get,omitempty"` // Id_ The document ID associated with the operation. Id_ *string `json:"_id,omitempty"` - // Index_ Name of the index associated with the operation. + // Index_ The name of the index associated with the operation. // If the operation targeted a data stream, this is the backing index into which // the document was written. Index_ string `json:"_index"` // PrimaryTerm_ The primary term assigned to the document for the operation. + // This property is returned only for successful operations. PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` - // Result Result of the operation. + // Result The result of the operation. // Successful values are `created`, `deleted`, and `updated`. Result *string `json:"result,omitempty"` // SeqNo_ The sequence number assigned to the document for the operation. - // Sequence numbers are used to ensure an older version of a document doesn’t + // Sequence numbers are used to ensure an older version of a document doesn't // overwrite a newer version. SeqNo_ *int64 `json:"_seq_no,omitempty"` - // Shards_ Contains shard information for the operation. + // Shards_ Shard information for the operation. Shards_ *ShardStatistics `json:"_shards,omitempty"` - // Status HTTP status code returned for the operation. + // Status The HTTP status code returned for the operation. Status int `json:"status"` // Version_ The document version associated with the operation. // The document version is incremented each time the document is updated. + // This property is returned only for successful actions. Version_ *int64 `json:"_version,omitempty"` } @@ -82,6 +87,11 @@ func (s *ResponseItem) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Error", err) } + case "failure_store": + if err := dec.Decode(&s.FailureStore); err != nil { + return fmt.Errorf("%s | %w", "FailureStore", err) + } + case "forced_refresh": var tmp any dec.Decode(&tmp) @@ -194,3 +204,5 @@ func NewResponseItem() *ResponseItem { return r } + +// false diff --git a/typedapi/types/restriction.go b/typedapi/types/restriction.go new file mode 100644 index 0000000000..2d17df7900 --- /dev/null +++ b/typedapi/types/restriction.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/restrictionworkflow" +) + +// Restriction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RoleDescriptor.ts#L135-L141 +type Restriction struct { + // Workflows A list of workflows to which the API key is restricted. + // NOTE: In order to use a role restriction, an API key must be created with a + // single role descriptor. + Workflows []restrictionworkflow.RestrictionWorkflow `json:"workflows"` +} + +// NewRestriction returns a Restriction. +func NewRestriction() *Restriction { + r := &Restriction{} + + return r +} + +// true + +type RestrictionVariant interface { + RestrictionCaster() *Restriction +} + +func (s *Restriction) RestrictionCaster() *Restriction { + return s +} diff --git a/typedapi/types/retention.go b/typedapi/types/retention.go index 0c156ca371..93bfcc5bcb 100644 --- a/typedapi/types/retention.go +++ b/typedapi/types/retention.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Retention type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/_types/SnapshotLifecycle.ts#L84-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/_types/SnapshotLifecycle.ts#L94-L107 type Retention struct { // ExpireAfter Time period after which a snapshot is considered expired and eligible for // deletion. SLM deletes expired snapshots based on the slm.retention_schedule. @@ -107,3 +107,13 @@ func NewRetention() *Retention { return r } + +// true + +type RetentionVariant interface { + RetentionCaster() *Retention +} + +func (s *Retention) RetentionCaster() *Retention { + return s +} diff --git a/typedapi/types/retentionlease.go b/typedapi/types/retentionlease.go index bd3f38eb01..634c3d01ed 100644 --- a/typedapi/types/retentionlease.go +++ b/typedapi/types/retentionlease.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RetentionLease type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L65-L67 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L66-L68 type RetentionLease struct { Period Duration `json:"period"` } @@ -66,3 +66,13 @@ func NewRetentionLease() *RetentionLease { return r } + +// true + +type RetentionLeaseVariant interface { + RetentionLeaseCaster() *RetentionLease +} + +func (s *RetentionLease) RetentionLeaseCaster() *RetentionLease { + return s +} diff --git a/typedapi/types/retentionpolicy.go b/typedapi/types/retentionpolicy.go index 6d8aa65776..fec34bf569 100644 --- a/typedapi/types/retentionpolicy.go +++ b/typedapi/types/retentionpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RetentionPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L88-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L88-L96 type RetentionPolicy struct { // Field The date field that is used to calculate the age of the document. Field string `json:"field"` @@ -76,3 +76,13 @@ func NewRetentionPolicy() *RetentionPolicy { return r } + +// true + +type RetentionPolicyVariant interface { + RetentionPolicyCaster() *RetentionPolicy +} + +func (s *RetentionPolicy) RetentionPolicyCaster() *RetentionPolicy { + return s +} diff --git a/typedapi/types/retentionpolicycontainer.go b/typedapi/types/retentionpolicycontainer.go index 5d3e03d16c..0fe42d3768 100644 --- a/typedapi/types/retentionpolicycontainer.go +++ b/typedapi/types/retentionpolicycontainer.go @@ -16,21 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // RetentionPolicyContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L80-L86 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L80-L86 type RetentionPolicyContainer struct { + AdditionalRetentionPolicyContainerProperty map[string]json.RawMessage `json:"-"` // Time Specifies that the transform uses a time field to set the retention policy. Time *RetentionPolicy `json:"time,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s RetentionPolicyContainer) MarshalJSON() ([]byte, error) { + type opt RetentionPolicyContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRetentionPolicyContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRetentionPolicyContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewRetentionPolicyContainer returns a RetentionPolicyContainer. func NewRetentionPolicyContainer() *RetentionPolicyContainer { - r := &RetentionPolicyContainer{} + r := &RetentionPolicyContainer{ + AdditionalRetentionPolicyContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type RetentionPolicyContainerVariant interface { + RetentionPolicyContainerCaster() *RetentionPolicyContainer +} + +func (s *RetentionPolicyContainer) RetentionPolicyContainerCaster() *RetentionPolicyContainer { + return s +} diff --git a/typedapi/types/retries.go b/typedapi/types/retries.go index fd635a7b3b..5e0ddf3223 100644 --- a/typedapi/types/retries.go +++ b/typedapi/types/retries.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,9 +31,11 @@ import ( // Retries type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Retries.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Retries.ts#L22-L31 type Retries struct { - Bulk int64 `json:"bulk"` + // Bulk The number of bulk actions retried. + Bulk int64 `json:"bulk"` + // Search The number of search actions retried. Search int64 `json:"search"` } @@ -93,3 +95,5 @@ func NewRetries() *Retries { return r } + +// false diff --git a/typedapi/types/retrievercontainer.go b/typedapi/types/retrievercontainer.go index dd5ad002a4..68f8d42487 100644 --- a/typedapi/types/retrievercontainer.go +++ b/typedapi/types/retrievercontainer.go @@ -16,25 +16,77 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // RetrieverContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Retriever.ts#L26-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Retriever.ts#L28-L42 type RetrieverContainer struct { + AdditionalRetrieverContainerProperty map[string]json.RawMessage `json:"-"` // Knn A retriever that replaces the functionality of a knn search. Knn *KnnRetriever `json:"knn,omitempty"` // Rrf A retriever that produces top documents from reciprocal rank fusion (RRF). Rrf *RRFRetriever `json:"rrf,omitempty"` + // Rule A retriever that replaces the functionality of a rule query. + Rule *RuleRetriever `json:"rule,omitempty"` // Standard A retriever that replaces the functionality of a traditional query. Standard *StandardRetriever `json:"standard,omitempty"` + // TextSimilarityReranker A retriever that reranks the top documents based on a reranking model using + // the InferenceAPI + TextSimilarityReranker *TextSimilarityReranker `json:"text_similarity_reranker,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s RetrieverContainer) MarshalJSON() ([]byte, error) { + type opt RetrieverContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRetrieverContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRetrieverContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewRetrieverContainer returns a RetrieverContainer. func NewRetrieverContainer() *RetrieverContainer { - r := &RetrieverContainer{} + r := &RetrieverContainer{ + AdditionalRetrieverContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type RetrieverContainerVariant interface { + RetrieverContainerCaster() *RetrieverContainer +} + +func (s *RetrieverContainer) RetrieverContainerCaster() *RetrieverContainer { + return s +} diff --git a/typedapi/types/reversenestedaggregate.go b/typedapi/types/reversenestedaggregate.go index d9d5f95447..45e2c0660f 100644 --- a/typedapi/types/reversenestedaggregate.go +++ b/typedapi/types/reversenestedaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // ReverseNestedAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L493-L494 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L540-L544 type ReverseNestedAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *ReverseNestedAggregate) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s ReverseNestedAggregate) MarshalJSON() ([]byte, error) { // NewReverseNestedAggregate returns a ReverseNestedAggregate. func NewReverseNestedAggregate() *ReverseNestedAggregate { r := &ReverseNestedAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/reversenestedaggregation.go b/typedapi/types/reversenestedaggregation.go index 6a47d4c1b8..056d37c0ee 100644 --- a/typedapi/types/reversenestedaggregation.go +++ b/typedapi/types/reversenestedaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ReverseNestedAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L721-L727 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L741-L747 type ReverseNestedAggregation struct { // Path Defines the nested object field that should be joined back to. // The default is empty, which means that it joins back to the root/main @@ -69,3 +69,13 @@ func NewReverseNestedAggregation() *ReverseNestedAggregation { return r } + +// true + +type ReverseNestedAggregationVariant interface { + ReverseNestedAggregationCaster() *ReverseNestedAggregation +} + +func (s *ReverseNestedAggregation) ReverseNestedAggregationCaster() *ReverseNestedAggregation { + return s +} diff --git a/typedapi/types/reversetokenfilter.go b/typedapi/types/reversetokenfilter.go index 0e629d5cd8..302712f64e 100644 --- a/typedapi/types/reversetokenfilter.go +++ b/typedapi/types/reversetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ReverseTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L307-L309 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L306-L308 type ReverseTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewReverseTokenFilter() *ReverseTokenFilter { return r } + +// true + +type ReverseTokenFilterVariant interface { + ReverseTokenFilterCaster() *ReverseTokenFilter +} + +func (s *ReverseTokenFilter) ReverseTokenFilterCaster() *ReverseTokenFilter { + return s +} diff --git a/typedapi/types/role.go b/typedapi/types/role.go index ffa568fafd..67c4945b59 100644 --- a/typedapi/types/role.go +++ b/typedapi/types/role.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,19 +26,25 @@ import ( "errors" "fmt" "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege" ) // Role type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_role/types.ts#L29-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_role/types.ts#L32-L54 type Role struct { Applications []ApplicationPrivileges `json:"applications"` - Cluster []string `json:"cluster"` + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster"` + Description *string `json:"description,omitempty"` Global map[string]map[string]map[string][]string `json:"global,omitempty"` Indices []IndicesPrivileges `json:"indices"` Metadata Metadata `json:"metadata"` + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` RoleTemplates []RoleTemplate `json:"role_templates,omitempty"` - RunAs []string `json:"run_as"` + RunAs []string `json:"run_as,omitempty"` TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` } @@ -67,6 +73,18 @@ func (s *Role) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Cluster", err) } + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + case "global": if s.Global == nil { s.Global = make(map[string]map[string]map[string][]string, 0) @@ -85,6 +103,16 @@ func (s *Role) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Metadata", err) } + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + case "role_templates": if err := dec.Decode(&s.RoleTemplates); err != nil { return fmt.Errorf("%s | %w", "RoleTemplates", err) @@ -111,9 +139,11 @@ func (s *Role) UnmarshalJSON(data []byte) error { // NewRole returns a Role. func NewRole() *Role { r := &Role{ - Global: make(map[string]map[string]map[string][]string, 0), - TransientMetadata: make(map[string]json.RawMessage, 0), + Global: make(map[string]map[string]map[string][]string), + TransientMetadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/roledescriptor.go b/typedapi/types/roledescriptor.go index 1f872ac923..de0f9bc3a2 100644 --- a/typedapi/types/roledescriptor.go +++ b/typedapi/types/roledescriptor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // RoleDescriptor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/RoleDescriptor.ts#L28-L61 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RoleDescriptor.ts#L33-L83 type RoleDescriptor struct { // Applications A list of application privilege entries Applications []ApplicationPrivileges `json:"applications,omitempty"` @@ -51,9 +51,17 @@ type RoleDescriptor struct { // Metadata Optional meta-data. Within the metadata object, keys that begin with `_` are // reserved for system usage. Metadata Metadata `json:"metadata,omitempty"` - // RunAs A list of users that the API keys can impersonate. *Note*: in Serverless, the - // run-as feature is disabled. For API compatibility, you can still specify an - // empty `run_as` field, but a non-empty list will be rejected. + // RemoteCluster A list of cluster permissions for remote clusters. + // NOTE: This is limited a subset of the cluster permissions. + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of indices permissions for remote clusters. + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // Restriction Restriction for when the role descriptor is allowed to be effective. + Restriction *Restriction `json:"restriction,omitempty"` + // RunAs A list of users that the API keys can impersonate. + // NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + // For API compatibility, you can still specify an empty `run_as` field, but a + // non-empty list will be rejected. RunAs []string `json:"run_as,omitempty"` TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` } @@ -121,6 +129,21 @@ func (s *RoleDescriptor) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Metadata", err) } + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "restriction": + if err := dec.Decode(&s.Restriction); err != nil { + return fmt.Errorf("%s | %w", "Restriction", err) + } + case "run_as": if err := dec.Decode(&s.RunAs); err != nil { return fmt.Errorf("%s | %w", "RunAs", err) @@ -142,8 +165,18 @@ func (s *RoleDescriptor) UnmarshalJSON(data []byte) error { // NewRoleDescriptor returns a RoleDescriptor. func NewRoleDescriptor() *RoleDescriptor { r := &RoleDescriptor{ - TransientMetadata: make(map[string]json.RawMessage, 0), + TransientMetadata: make(map[string]json.RawMessage), } return r } + +// true + +type RoleDescriptorVariant interface { + RoleDescriptorCaster() *RoleDescriptor +} + +func (s *RoleDescriptor) RoleDescriptorCaster() *RoleDescriptor { + return s +} diff --git a/typedapi/types/roledescriptorread.go b/typedapi/types/roledescriptorread.go index 5cb50fec8b..661233d1d9 100644 --- a/typedapi/types/roledescriptorread.go +++ b/typedapi/types/roledescriptorread.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,14 +33,14 @@ import ( // RoleDescriptorRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/RoleDescriptor.ts#L63-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RoleDescriptor.ts#L85-L133 type RoleDescriptorRead struct { // Applications A list of application privilege entries Applications []ApplicationPrivileges `json:"applications,omitempty"` // Cluster A list of cluster privileges. These privileges define the cluster level // actions that API keys are able to execute. Cluster []clusterprivilege.ClusterPrivilege `json:"cluster"` - // Description Optional description of the role descriptor + // Description An optional description of the role descriptor. Description *string `json:"description,omitempty"` // Global An object defining global privileges. A global privilege is a form of cluster // privilege that is request-aware. Support for global privileges is currently @@ -51,6 +51,13 @@ type RoleDescriptorRead struct { // Metadata Optional meta-data. Within the metadata object, keys that begin with `_` are // reserved for system usage. Metadata Metadata `json:"metadata,omitempty"` + // RemoteCluster A list of cluster permissions for remote clusters. + // NOTE: This is limited a subset of the cluster permissions. + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of indices permissions for remote clusters. + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // Restriction A restriction for when the role descriptor is allowed to be effective. + Restriction *Restriction `json:"restriction,omitempty"` // RunAs A list of users that the API keys can impersonate. RunAs []string `json:"run_as,omitempty"` TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` @@ -119,6 +126,21 @@ func (s *RoleDescriptorRead) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Metadata", err) } + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "restriction": + if err := dec.Decode(&s.Restriction); err != nil { + return fmt.Errorf("%s | %w", "Restriction", err) + } + case "run_as": if err := dec.Decode(&s.RunAs); err != nil { return fmt.Errorf("%s | %w", "RunAs", err) @@ -140,8 +162,10 @@ func (s *RoleDescriptorRead) UnmarshalJSON(data []byte) error { // NewRoleDescriptorRead returns a RoleDescriptorRead. func NewRoleDescriptorRead() *RoleDescriptorRead { r := &RoleDescriptorRead{ - TransientMetadata: make(map[string]json.RawMessage, 0), + TransientMetadata: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/roledescriptorwrapper.go b/typedapi/types/roledescriptorwrapper.go index a497e87a05..28e2d3d062 100644 --- a/typedapi/types/roledescriptorwrapper.go +++ b/typedapi/types/roledescriptorwrapper.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // RoleDescriptorWrapper type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_service_accounts/types.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_service_accounts/types.ts#L22-L24 type RoleDescriptorWrapper struct { RoleDescriptor RoleDescriptorRead `json:"role_descriptor"` } @@ -33,3 +33,5 @@ func NewRoleDescriptorWrapper() *RoleDescriptorWrapper { return r } + +// false diff --git a/typedapi/types/rolemappingrule.go b/typedapi/types/rolemappingrule.go index 94e6b51abb..dcf968d1e2 100644 --- a/typedapi/types/rolemappingrule.go +++ b/typedapi/types/rolemappingrule.go @@ -16,23 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // RoleMappingRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/RoleMappingRule.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RoleMappingRule.ts#L22-L33 type RoleMappingRule struct { - All []RoleMappingRule `json:"all,omitempty"` - Any []RoleMappingRule `json:"any,omitempty"` - Except *RoleMappingRule `json:"except,omitempty"` - Field *FieldRule `json:"field,omitempty"` + AdditionalRoleMappingRuleProperty map[string]json.RawMessage `json:"-"` + All []RoleMappingRule `json:"all,omitempty"` + Any []RoleMappingRule `json:"any,omitempty"` + Except *RoleMappingRule `json:"except,omitempty"` + Field *FieldRule `json:"field,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s RoleMappingRule) MarshalJSON() ([]byte, error) { + type opt RoleMappingRule + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRoleMappingRuleProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRoleMappingRuleProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewRoleMappingRule returns a RoleMappingRule. func NewRoleMappingRule() *RoleMappingRule { - r := &RoleMappingRule{} + r := &RoleMappingRule{ + AdditionalRoleMappingRuleProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type RoleMappingRuleVariant interface { + RoleMappingRuleCaster() *RoleMappingRule +} + +func (s *RoleMappingRule) RoleMappingRuleCaster() *RoleMappingRule { + return s +} diff --git a/typedapi/types/rolequerycontainer.go b/typedapi/types/rolequerycontainer.go index 744f26ae2a..56b459bc86 100644 --- a/typedapi/types/rolequerycontainer.go +++ b/typedapi/types/rolequerycontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,8 +30,9 @@ import ( // RoleQueryContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_role/types.ts#L37-L101 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_role/types.ts#L37-L101 type RoleQueryContainer struct { + AdditionalRoleQueryContainerProperty map[string]json.RawMessage `json:"-"` // Bool matches roles matching boolean combinations of other queries. Bool *BoolQuery `json:"bool,omitempty"` // Exists Returns roles that contain an indexed value for a field. @@ -159,20 +160,73 @@ func (s *RoleQueryContainer) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wildcard", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalRoleQueryContainerProperty == nil { + s.AdditionalRoleQueryContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalRoleQueryContainerProperty", err) + } + s.AdditionalRoleQueryContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s RoleQueryContainer) MarshalJSON() ([]byte, error) { + type opt RoleQueryContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRoleQueryContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRoleQueryContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewRoleQueryContainer returns a RoleQueryContainer. func NewRoleQueryContainer() *RoleQueryContainer { r := &RoleQueryContainer{ - Match: make(map[string]MatchQuery, 0), - Prefix: make(map[string]PrefixQuery, 0), - Range: make(map[string]RangeQuery, 0), - Term: make(map[string]TermQuery, 0), - Wildcard: make(map[string]WildcardQuery, 0), + AdditionalRoleQueryContainerProperty: make(map[string]json.RawMessage), + Match: make(map[string]MatchQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Term: make(map[string]TermQuery), + Wildcard: make(map[string]WildcardQuery), } return r } + +// true + +type RoleQueryContainerVariant interface { + RoleQueryContainerCaster() *RoleQueryContainer +} + +func (s *RoleQueryContainer) RoleQueryContainerCaster() *RoleQueryContainer { + return s +} diff --git a/typedapi/types/roletemplate.go b/typedapi/types/roletemplate.go index 8a75ed6ceb..04d2684727 100644 --- a/typedapi/types/roletemplate.go +++ b/typedapi/types/roletemplate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // RoleTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/RoleTemplate.ts#L28-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RoleTemplate.ts#L28-L31 type RoleTemplate struct { Format *templateformat.TemplateFormat `json:"format,omitempty"` Template Script `json:"template"` @@ -38,3 +38,13 @@ func NewRoleTemplate() *RoleTemplate { return r } + +// true + +type RoleTemplateVariant interface { + RoleTemplateCaster() *RoleTemplate +} + +func (s *RoleTemplate) RoleTemplateCaster() *RoleTemplate { + return s +} diff --git a/typedapi/types/roletemplateinlinequery.go b/typedapi/types/roletemplateinlinequery.go index 67b0a9f426..df3e4f5597 100644 --- a/typedapi/types/roletemplateinlinequery.go +++ b/typedapi/types/roletemplateinlinequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // Query // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L289-L290 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L383-L384 type RoleTemplateInlineQuery any + +type RoleTemplateInlineQueryVariant interface { + RoleTemplateInlineQueryCaster() *RoleTemplateInlineQuery +} diff --git a/typedapi/types/roletemplatequery.go b/typedapi/types/roletemplatequery.go index d44ce52c1d..6a691c05e2 100644 --- a/typedapi/types/roletemplatequery.go +++ b/typedapi/types/roletemplatequery.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // RoleTemplateQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L257-L267 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L351-L361 type RoleTemplateQuery struct { // Template When you create a role, you can specify a query that defines the document // level security permissions. You can optionally @@ -41,3 +41,13 @@ func NewRoleTemplateQuery() *RoleTemplateQuery { return r } + +// true + +type RoleTemplateQueryVariant interface { + RoleTemplateQueryCaster() *RoleTemplateQuery +} + +func (s *RoleTemplateQuery) RoleTemplateQueryCaster() *RoleTemplateQuery { + return s +} diff --git a/typedapi/types/roletemplatescript.go b/typedapi/types/roletemplatescript.go index 57dcd77eec..0e45beb6ff 100644 --- a/typedapi/types/roletemplatescript.go +++ b/typedapi/types/roletemplatescript.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // RoleTemplateScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L269-L287 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L363-L381 type RoleTemplateScript struct { // Id The `id` for a stored script. Id *string `json:"id,omitempty"` @@ -109,7 +109,7 @@ func (s *RoleTemplateScript) UnmarshalJSON(data []byte) error { switch t { - case "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": o := NewQuery() localDec := json.NewDecoder(bytes.NewReader(message)) if err := localDec.Decode(&o); err != nil { @@ -135,9 +135,19 @@ func (s *RoleTemplateScript) UnmarshalJSON(data []byte) error { // NewRoleTemplateScript returns a RoleTemplateScript. func NewRoleTemplateScript() *RoleTemplateScript { r := &RoleTemplateScript{ - Options: make(map[string]string, 0), - Params: make(map[string]json.RawMessage, 0), + Options: make(map[string]string), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type RoleTemplateScriptVariant interface { + RoleTemplateScriptCaster() *RoleTemplateScript +} + +func (s *RoleTemplateScript) RoleTemplateScriptCaster() *RoleTemplateScript { + return s +} diff --git a/typedapi/types/rolloveraction.go b/typedapi/types/rolloveraction.go index 5a57abfcfe..973fdb8d3a 100644 --- a/typedapi/types/rolloveraction.go +++ b/typedapi/types/rolloveraction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RolloverAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L102-L113 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L99-L110 type RolloverAction struct { MaxAge Duration `json:"max_age,omitempty"` MaxDocs *int64 `json:"max_docs,omitempty"` @@ -161,3 +161,13 @@ func NewRolloverAction() *RolloverAction { return r } + +// true + +type RolloverActionVariant interface { + RolloverActionCaster() *RolloverAction +} + +func (s *RolloverAction) RolloverActionCaster() *RolloverAction { + return s +} diff --git a/typedapi/types/rolloverconditions.go b/typedapi/types/rolloverconditions.go index 49a824cc99..aed0072bd9 100644 --- a/typedapi/types/rolloverconditions.go +++ b/typedapi/types/rolloverconditions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RolloverConditions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/rollover/types.ts#L24-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/rollover/types.ts#L24-L40 type RolloverConditions struct { MaxAge Duration `json:"max_age,omitempty"` MaxAgeMillis *int64 `json:"max_age_millis,omitempty"` @@ -231,3 +231,13 @@ func NewRolloverConditions() *RolloverConditions { return r } + +// true + +type RolloverConditionsVariant interface { + RolloverConditionsCaster() *RolloverConditions +} + +func (s *RolloverConditions) RolloverConditionsCaster() *RolloverConditions { + return s +} diff --git a/typedapi/types/rollupcapabilities.go b/typedapi/types/rollupcapabilities.go index 080c8415bb..319716142a 100644 --- a/typedapi/types/rollupcapabilities.go +++ b/typedapi/types/rollupcapabilities.go @@ -16,14 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // RollupCapabilities type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_rollup_caps/types.ts#L25-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_rollup_caps/types.ts#L24-L29 type RollupCapabilities struct { + // RollupJobs There can be multiple, independent jobs configured for a single index or + // index pattern. Each of these jobs may have different configurations, so the + // API returns a list of all the various configurations available. RollupJobs []RollupCapabilitySummary `json:"rollup_jobs"` } @@ -33,3 +36,5 @@ func NewRollupCapabilities() *RollupCapabilities { return r } + +// false diff --git a/typedapi/types/rollupcapabilitysummary.go b/typedapi/types/rollupcapabilitysummary.go index a013c571d2..a91018e96e 100644 --- a/typedapi/types/rollupcapabilitysummary.go +++ b/typedapi/types/rollupcapabilitysummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RollupCapabilitySummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_rollup_caps/types.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_rollup_caps/types.ts#L31-L36 type RollupCapabilitySummary struct { Fields map[string][]RollupFieldSummary `json:"fields"` IndexPattern string `json:"index_pattern"` @@ -106,8 +106,10 @@ func (s *RollupCapabilitySummary) UnmarshalJSON(data []byte) error { // NewRollupCapabilitySummary returns a RollupCapabilitySummary. func NewRollupCapabilitySummary() *RollupCapabilitySummary { r := &RollupCapabilitySummary{ - Fields: make(map[string][]RollupFieldSummary, 0), + Fields: make(map[string][]RollupFieldSummary), } return r } + +// false diff --git a/typedapi/types/rollupfieldsummary.go b/typedapi/types/rollupfieldsummary.go index b93b5a59ee..b18767f704 100644 --- a/typedapi/types/rollupfieldsummary.go +++ b/typedapi/types/rollupfieldsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RollupFieldSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_rollup_caps/types.ts#L36-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_rollup_caps/types.ts#L38-L42 type RollupFieldSummary struct { Agg string `json:"agg"` CalendarInterval Duration `json:"calendar_interval,omitempty"` @@ -86,3 +86,5 @@ func NewRollupFieldSummary() *RollupFieldSummary { return r } + +// false diff --git a/typedapi/types/rollupjob.go b/typedapi/types/rollupjob.go index 87d4ec89df..728b25aa6d 100644 --- a/typedapi/types/rollupjob.go +++ b/typedapi/types/rollupjob.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // RollupJob type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_jobs/types.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_jobs/types.ts#L28-L43 type RollupJob struct { + // Config The rollup job configuration. Config RollupJobConfiguration `json:"config"` - Stats RollupJobStats `json:"stats"` - Status RollupJobStatus `json:"status"` + // Stats Transient statistics about the rollup job, such as how many documents have + // been processed and how many rollup summary docs have been indexed. + // These stats are not persisted. + // If a node is restarted, these stats are reset. + Stats RollupJobStats `json:"stats"` + // Status The current status of the indexer for the rollup job. + Status RollupJobStatus `json:"status"` } // NewRollupJob returns a RollupJob. @@ -35,3 +41,5 @@ func NewRollupJob() *RollupJob { return r } + +// false diff --git a/typedapi/types/rollupjobconfiguration.go b/typedapi/types/rollupjobconfiguration.go index 3594706410..11964b8daf 100644 --- a/typedapi/types/rollupjobconfiguration.go +++ b/typedapi/types/rollupjobconfiguration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RollupJobConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_jobs/types.ts#L34-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_jobs/types.ts#L45-L54 type RollupJobConfiguration struct { Cron string `json:"cron"` Groups Groupings `json:"groups"` @@ -133,3 +133,5 @@ func NewRollupJobConfiguration() *RollupJobConfiguration { return r } + +// false diff --git a/typedapi/types/rollupjobstats.go b/typedapi/types/rollupjobstats.go index 6edfe417c6..808ff51b10 100644 --- a/typedapi/types/rollupjobstats.go +++ b/typedapi/types/rollupjobstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RollupJobStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_jobs/types.ts#L45-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_jobs/types.ts#L56-L69 type RollupJobStats struct { DocumentsProcessed int64 `json:"documents_processed"` IndexFailures int64 `json:"index_failures"` @@ -223,3 +223,5 @@ func NewRollupJobStats() *RollupJobStats { return r } + +// false diff --git a/typedapi/types/rollupjobstatus.go b/typedapi/types/rollupjobstatus.go index 0b03c8fef9..494b0e19c0 100644 --- a/typedapi/types/rollupjobstatus.go +++ b/typedapi/types/rollupjobstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // RollupJobStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_jobs/types.ts#L60-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_jobs/types.ts#L71-L75 type RollupJobStatus struct { CurrentPosition map[string]json.RawMessage `json:"current_position,omitempty"` JobState indexingjobstate.IndexingJobState `json:"job_state"` @@ -90,8 +90,10 @@ func (s *RollupJobStatus) UnmarshalJSON(data []byte) error { // NewRollupJobStatus returns a RollupJobStatus. func NewRollupJobStatus() *RollupJobStatus { r := &RollupJobStatus{ - CurrentPosition: make(map[string]json.RawMessage, 0), + CurrentPosition: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/rollupjobsummary.go b/typedapi/types/rollupjobsummary.go index e83be14220..7117a3609b 100644 --- a/typedapi/types/rollupjobsummary.go +++ b/typedapi/types/rollupjobsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RollupJobSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_rollup_index_caps/types.ts#L28-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_rollup_index_caps/types.ts#L28-L33 type RollupJobSummary struct { Fields map[string][]RollupJobSummaryField `json:"fields"` IndexPattern string `json:"index_pattern"` @@ -92,8 +92,10 @@ func (s *RollupJobSummary) UnmarshalJSON(data []byte) error { // NewRollupJobSummary returns a RollupJobSummary. func NewRollupJobSummary() *RollupJobSummary { r := &RollupJobSummary{ - Fields: make(map[string][]RollupJobSummaryField, 0), + Fields: make(map[string][]RollupJobSummaryField), } return r } + +// false diff --git a/typedapi/types/rollupjobsummaryfield.go b/typedapi/types/rollupjobsummaryfield.go index e5cb0f551a..31fa9ac3d0 100644 --- a/typedapi/types/rollupjobsummaryfield.go +++ b/typedapi/types/rollupjobsummaryfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RollupJobSummaryField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/get_rollup_index_caps/types.ts#L35-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/get_rollup_index_caps/types.ts#L35-L39 type RollupJobSummaryField struct { Agg string `json:"agg"` CalendarInterval Duration `json:"calendar_interval,omitempty"` @@ -86,3 +86,5 @@ func NewRollupJobSummaryField() *RollupJobSummaryField { return r } + +// false diff --git a/typedapi/types/romaniananalyzer.go b/typedapi/types/romaniananalyzer.go new file mode 100644 index 0000000000..e23f9188a6 --- /dev/null +++ b/typedapi/types/romaniananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RomanianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L248-L253 +type RomanianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *RomanianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RomanianAnalyzer) MarshalJSON() ([]byte, error) { + type innerRomanianAnalyzer RomanianAnalyzer + tmp := innerRomanianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "romanian" + + return json.Marshal(tmp) +} + +// NewRomanianAnalyzer returns a RomanianAnalyzer. +func NewRomanianAnalyzer() *RomanianAnalyzer { + r := &RomanianAnalyzer{} + + return r +} + +// true + +type RomanianAnalyzerVariant interface { + RomanianAnalyzerCaster() *RomanianAnalyzer +} + +func (s *RomanianAnalyzer) RomanianAnalyzerCaster() *RomanianAnalyzer { + return s +} diff --git a/typedapi/types/routingfield.go b/typedapi/types/routingfield.go index 617c758782..881c4d9450 100644 --- a/typedapi/types/routingfield.go +++ b/typedapi/types/routingfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RoutingField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/meta-fields.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/meta-fields.ts#L50-L52 type RoutingField struct { Required bool `json:"required"` } @@ -76,3 +76,13 @@ func NewRoutingField() *RoutingField { return r } + +// true + +type RoutingFieldVariant interface { + RoutingFieldCaster() *RoutingField +} + +func (s *RoutingField) RoutingFieldCaster() *RoutingField { + return s +} diff --git a/typedapi/types/rrfrank.go b/typedapi/types/rrfrank.go index 93a13af9e1..197cb13bd4 100644 --- a/typedapi/types/rrfrank.go +++ b/typedapi/types/rrfrank.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RrfRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Rank.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Rank.ts#L32-L37 type RrfRank struct { // RankConstant How much influence documents in individual result sets per query have over // the final ranked result set @@ -96,3 +96,13 @@ func NewRrfRank() *RrfRank { return r } + +// true + +type RrfRankVariant interface { + RrfRankCaster() *RrfRank +} + +func (s *RrfRank) RrfRankCaster() *RrfRank { + return s +} diff --git a/typedapi/types/rrfretriever.go b/typedapi/types/rrfretriever.go index 354cf5c92d..721548f9b5 100644 --- a/typedapi/types/rrfretriever.go +++ b/typedapi/types/rrfretriever.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,10 +31,13 @@ import ( // RRFRetriever type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Retriever.ts#L73-L80 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Retriever.ts#L84-L91 type RRFRetriever struct { // Filter Query to filter the documents that can match. Filter []Query `json:"filter,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` // RankConstant This value determines how much influence documents in individual result sets // per query have over the final ranked result set. RankConstant *int `json:"rank_constant,omitempty"` @@ -76,6 +79,22 @@ func (s *RRFRetriever) UnmarshalJSON(data []byte) error { } } + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + case "rank_constant": var tmp any @@ -124,3 +143,13 @@ func NewRRFRetriever() *RRFRetriever { return r } + +// true + +type RRFRetrieverVariant interface { + RRFRetrieverCaster() *RRFRetriever +} + +func (s *RRFRetriever) RRFRetrieverCaster() *RRFRetriever { + return s +} diff --git a/typedapi/types/rulecondition.go b/typedapi/types/rulecondition.go index 6e01bd2cc1..812c56404c 100644 --- a/typedapi/types/rulecondition.go +++ b/typedapi/types/rulecondition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // RuleCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Rule.ts#L52-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Rule.ts#L52-L65 type RuleCondition struct { // AppliesTo Specifies the result property to which the condition applies. If your // detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can @@ -99,3 +99,13 @@ func NewRuleCondition() *RuleCondition { return r } + +// true + +type RuleConditionVariant interface { + RuleConditionCaster() *RuleCondition +} + +func (s *RuleCondition) RuleConditionCaster() *RuleCondition { + return s +} diff --git a/typedapi/types/rulequery.go b/typedapi/types/rulequery.go index 6814b365d5..4907e2272f 100644 --- a/typedapi/types/rulequery.go +++ b/typedapi/types/rulequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RuleQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L381-L385 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L398-L405 type RuleQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -40,7 +40,7 @@ type RuleQuery struct { // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` MatchCriteria json.RawMessage `json:"match_criteria,omitempty"` - Organic *Query `json:"organic,omitempty"` + Organic Query `json:"organic"` QueryName_ *string `json:"_name,omitempty"` RulesetIds []string `json:"ruleset_ids"` } @@ -114,3 +114,13 @@ func NewRuleQuery() *RuleQuery { return r } + +// true + +type RuleQueryVariant interface { + RuleQueryCaster() *RuleQuery +} + +func (s *RuleQuery) RuleQueryCaster() *RuleQuery { + return s +} diff --git a/typedapi/types/ruleretriever.go b/typedapi/types/ruleretriever.go new file mode 100644 index 0000000000..c597523e78 --- /dev/null +++ b/typedapi/types/ruleretriever.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RuleRetriever type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Retriever.ts#L106-L115 +type RuleRetriever struct { + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + // MatchCriteria The match criteria that will determine if a rule in the provided rulesets + // should be applied. + MatchCriteria json.RawMessage `json:"match_criteria,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // RankWindowSize This value determines the size of the individual result set. + RankWindowSize *int `json:"rank_window_size,omitempty"` + // Retriever The retriever whose results rules should be applied to. + Retriever RetrieverContainer `json:"retriever"` + // RulesetIds The ruleset IDs containing the rules this retriever is evaluating against. + RulesetIds []string `json:"ruleset_ids"` +} + +func (s *RuleRetriever) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "match_criteria": + if err := dec.Decode(&s.MatchCriteria); err != nil { + return fmt.Errorf("%s | %w", "MatchCriteria", err) + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "rank_window_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RankWindowSize", err) + } + s.RankWindowSize = &value + case float64: + f := int(v) + s.RankWindowSize = &f + } + + case "retriever": + if err := dec.Decode(&s.Retriever); err != nil { + return fmt.Errorf("%s | %w", "Retriever", err) + } + + case "ruleset_ids": + if err := dec.Decode(&s.RulesetIds); err != nil { + return fmt.Errorf("%s | %w", "RulesetIds", err) + } + + } + } + return nil +} + +// NewRuleRetriever returns a RuleRetriever. +func NewRuleRetriever() *RuleRetriever { + r := &RuleRetriever{} + + return r +} + +// true + +type RuleRetrieverVariant interface { + RuleRetrieverCaster() *RuleRetriever +} + +func (s *RuleRetriever) RuleRetrieverCaster() *RuleRetriever { + return s +} diff --git a/typedapi/types/runningstatesearchinterval.go b/typedapi/types/runningstatesearchinterval.go index 6354e288f6..060265f834 100644 --- a/typedapi/types/runningstatesearchinterval.go +++ b/typedapi/types/runningstatesearchinterval.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // RunningStateSearchInterval type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Datafeed.ts#L213-L230 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Datafeed.ts#L226-L243 type RunningStateSearchInterval struct { // End The end time. End Duration `json:"end,omitempty"` @@ -88,3 +88,5 @@ func NewRunningStateSearchInterval() *RunningStateSearchInterval { return r } + +// false diff --git a/typedapi/types/runtimefield.go b/typedapi/types/runtimefield.go index 37e9edffe6..81a995ca8a 100644 --- a/typedapi/types/runtimefield.go +++ b/typedapi/types/runtimefield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,10 +33,12 @@ import ( // RuntimeField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/RuntimeFields.ts#L26-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/RuntimeFields.ts#L26-L50 type RuntimeField struct { // FetchFields For type `lookup` FetchFields []RuntimeFieldFetchFields `json:"fetch_fields,omitempty"` + // Fields For type `composite` + Fields map[string]CompositeSubField `json:"fields,omitempty"` // Format A custom format for `date` type runtime fields. Format *string `json:"format,omitempty"` // InputField For type `lookup` @@ -72,6 +74,14 @@ func (s *RuntimeField) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "FetchFields", err) } + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]CompositeSubField, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + case "format": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -116,7 +126,19 @@ func (s *RuntimeField) UnmarshalJSON(data []byte) error { // NewRuntimeField returns a RuntimeField. func NewRuntimeField() *RuntimeField { - r := &RuntimeField{} + r := &RuntimeField{ + Fields: make(map[string]CompositeSubField), + } return r } + +// true + +type RuntimeFieldVariant interface { + RuntimeFieldCaster() *RuntimeField +} + +func (s *RuntimeField) RuntimeFieldCaster() *RuntimeField { + return s +} diff --git a/typedapi/types/runtimefieldfetchfields.go b/typedapi/types/runtimefieldfetchfields.go index b2526515d6..b56ade680d 100644 --- a/typedapi/types/runtimefieldfetchfields.go +++ b/typedapi/types/runtimefieldfetchfields.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RuntimeFieldFetchFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/RuntimeFields.ts#L50-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/RuntimeFields.ts#L56-L60 type RuntimeFieldFetchFields struct { Field string `json:"field"` Format *string `json:"format,omitempty"` @@ -92,3 +92,13 @@ func NewRuntimeFieldFetchFields() *RuntimeFieldFetchFields { return r } + +// true + +type RuntimeFieldFetchFieldsVariant interface { + RuntimeFieldFetchFieldsCaster() *RuntimeFieldFetchFields +} + +func (s *RuntimeFieldFetchFields) RuntimeFieldFetchFieldsCaster() *RuntimeFieldFetchFields { + return s +} diff --git a/typedapi/types/runtimefields.go b/typedapi/types/runtimefields.go index f0d33e2562..a984a8b0e9 100644 --- a/typedapi/types/runtimefields.go +++ b/typedapi/types/runtimefields.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // RuntimeFields type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/RuntimeFields.ts#L24-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/RuntimeFields.ts#L24-L24 type RuntimeFields map[string]RuntimeField + +type RuntimeFieldsVariant interface { + RuntimeFieldsCaster() *RuntimeFields +} diff --git a/typedapi/types/runtimefieldstype.go b/typedapi/types/runtimefieldstype.go index b797b490c8..800e205657 100644 --- a/typedapi/types/runtimefieldstype.go +++ b/typedapi/types/runtimefieldstype.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // RuntimeFieldsType type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L279-L294 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L289-L304 type RuntimeFieldsType struct { CharsMax int64 `json:"chars_max"` CharsTotal int64 `json:"chars_total"` @@ -265,3 +265,5 @@ func NewRuntimeFieldsType() *RuntimeFieldsType { return r } + +// false diff --git a/typedapi/types/russiananalyzer.go b/typedapi/types/russiananalyzer.go new file mode 100644 index 0000000000..70a6c2d5a4 --- /dev/null +++ b/typedapi/types/russiananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RussianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L255-L260 +type RussianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *RussianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RussianAnalyzer) MarshalJSON() ([]byte, error) { + type innerRussianAnalyzer RussianAnalyzer + tmp := innerRussianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "russian" + + return json.Marshal(tmp) +} + +// NewRussianAnalyzer returns a RussianAnalyzer. +func NewRussianAnalyzer() *RussianAnalyzer { + r := &RussianAnalyzer{} + + return r +} + +// true + +type RussianAnalyzerVariant interface { + RussianAnalyzerCaster() *RussianAnalyzer +} + +func (s *RussianAnalyzer) RussianAnalyzerCaster() *RussianAnalyzer { + return s +} diff --git a/typedapi/types/s3repository.go b/typedapi/types/s3repository.go index 4b4915ab76..3aa610c06d 100644 --- a/typedapi/types/s3repository.go +++ b/typedapi/types/s3repository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // S3Repository type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L50-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L50-L53 type S3Repository struct { Settings S3RepositorySettings `json:"settings"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewS3Repository() *S3Repository { return r } + +// true + +type S3RepositoryVariant interface { + S3RepositoryCaster() *S3Repository +} + +func (s *S3Repository) S3RepositoryCaster() *S3Repository { + return s +} diff --git a/typedapi/types/s3repositorysettings.go b/typedapi/types/s3repositorysettings.go index 7afd61183c..16b0add46f 100644 --- a/typedapi/types/s3repositorysettings.go +++ b/typedapi/types/s3repositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // S3RepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L93-L102 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L93-L102 type S3RepositorySettings struct { BasePath *string `json:"base_path,omitempty"` Bucket string `json:"bucket"` @@ -195,3 +195,13 @@ func NewS3RepositorySettings() *S3RepositorySettings { return r } + +// true + +type S3RepositorySettingsVariant interface { + S3RepositorySettingsCaster() *S3RepositorySettings +} + +func (s *S3RepositorySettings) S3RepositorySettingsCaster() *S3RepositorySettings { + return s +} diff --git a/typedapi/types/samplediversity.go b/typedapi/types/samplediversity.go index 8e0c713660..6f22d97a43 100644 --- a/typedapi/types/samplediversity.go +++ b/typedapi/types/samplediversity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SampleDiversity type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/graph/_types/ExploreControls.ts#L51-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/graph/_types/ExploreControls.ts#L51-L54 type SampleDiversity struct { Field string `json:"field"` MaxDocsPerValue int `json:"max_docs_per_value"` @@ -84,3 +84,13 @@ func NewSampleDiversity() *SampleDiversity { return r } + +// true + +type SampleDiversityVariant interface { + SampleDiversityCaster() *SampleDiversity +} + +func (s *SampleDiversity) SampleDiversityCaster() *SampleDiversity { + return s +} diff --git a/typedapi/types/sampleraggregate.go b/typedapi/types/sampleraggregate.go index 4a534c73df..72f07ce1f9 100644 --- a/typedapi/types/sampleraggregate.go +++ b/typedapi/types/sampleraggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // SamplerAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L502-L503 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L558-L559 type SamplerAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *SamplerAggregate) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s SamplerAggregate) MarshalJSON() ([]byte, error) { // NewSamplerAggregate returns a SamplerAggregate. func NewSamplerAggregate() *SamplerAggregate { r := &SamplerAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/sampleraggregation.go b/typedapi/types/sampleraggregation.go index 5f5a57e95b..9c30d1da82 100644 --- a/typedapi/types/sampleraggregation.go +++ b/typedapi/types/sampleraggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SamplerAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L729-L735 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L771-L780 type SamplerAggregation struct { // ShardSize Limits how many top-scoring documents are collected in the sample processed // on each shard. @@ -80,3 +80,13 @@ func NewSamplerAggregation() *SamplerAggregation { return r } + +// true + +type SamplerAggregationVariant interface { + SamplerAggregationCaster() *SamplerAggregation +} + +func (s *SamplerAggregation) SamplerAggregationCaster() *SamplerAggregation { + return s +} diff --git a/typedapi/types/scalarvalue.go b/typedapi/types/scalarvalue.go index 7942d07c57..a174ddcbd2 100644 --- a/typedapi/types/scalarvalue.go +++ b/typedapi/types/scalarvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,5 +28,9 @@ package types // bool // nil // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L39-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L39-L43 type ScalarValue any + +type ScalarValueVariant interface { + ScalarValueCaster() *ScalarValue +} diff --git a/typedapi/types/scaledfloatnumberproperty.go b/typedapi/types/scaledfloatnumberproperty.go index bff70618dc..541346b4bc 100644 --- a/typedapi/types/scaledfloatnumberproperty.go +++ b/typedapi/types/scaledfloatnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // ScaledFloatNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L182-L186 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L186-L190 type ScaledFloatNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,14 +48,14 @@ type ScaledFloatNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *Float64 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - ScalingFactor *Float64 `json:"scaling_factor,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *Float64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + ScalingFactor *Float64 `json:"scaling_factor,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -163,301 +164,313 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -555,301 +568,313 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -876,18 +901,6 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -902,6 +915,11 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -950,8 +968,8 @@ func (s ScaledFloatNumberProperty) MarshalJSON() ([]byte, error) { Properties: s.Properties, ScalingFactor: s.ScalingFactor, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -965,10 +983,20 @@ func (s ScaledFloatNumberProperty) MarshalJSON() ([]byte, error) { // NewScaledFloatNumberProperty returns a ScaledFloatNumberProperty. func NewScaledFloatNumberProperty() *ScaledFloatNumberProperty { r := &ScaledFloatNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ScaledFloatNumberPropertyVariant interface { + ScaledFloatNumberPropertyCaster() *ScaledFloatNumberProperty +} + +func (s *ScaledFloatNumberProperty) ScaledFloatNumberPropertyCaster() *ScaledFloatNumberProperty { + return s +} diff --git a/typedapi/types/schedulecontainer.go b/typedapi/types/schedulecontainer.go index 1c98d87ab1..3efa1df0f6 100644 --- a/typedapi/types/schedulecontainer.go +++ b/typedapi/types/schedulecontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,19 +26,22 @@ import ( "errors" "fmt" "io" + "strconv" ) // ScheduleContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L80-L91 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L80-L92 type ScheduleContainer struct { - Cron *string `json:"cron,omitempty"` - Daily *DailySchedule `json:"daily,omitempty"` - Hourly *HourlySchedule `json:"hourly,omitempty"` - Interval Duration `json:"interval,omitempty"` - Monthly []TimeOfMonth `json:"monthly,omitempty"` - Weekly []TimeOfWeek `json:"weekly,omitempty"` - Yearly []TimeOfYear `json:"yearly,omitempty"` + AdditionalScheduleContainerProperty map[string]json.RawMessage `json:"-"` + Cron *string `json:"cron,omitempty"` + Daily *DailySchedule `json:"daily,omitempty"` + Hourly *HourlySchedule `json:"hourly,omitempty"` + Interval Duration `json:"interval,omitempty"` + Monthly []TimeOfMonth `json:"monthly,omitempty"` + Timezone *string `json:"timezone,omitempty"` + Weekly []TimeOfWeek `json:"weekly,omitempty"` + Yearly []TimeOfYear `json:"yearly,omitempty"` } func (s *ScheduleContainer) UnmarshalJSON(data []byte) error { @@ -92,6 +95,18 @@ func (s *ScheduleContainer) UnmarshalJSON(data []byte) error { } } + case "timezone": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timezone", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timezone = &o + case "weekly": rawMsg := json.RawMessage{} dec.Decode(&rawMsg) @@ -124,14 +139,68 @@ func (s *ScheduleContainer) UnmarshalJSON(data []byte) error { } } + default: + + if key, ok := t.(string); ok { + if s.AdditionalScheduleContainerProperty == nil { + s.AdditionalScheduleContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalScheduleContainerProperty", err) + } + s.AdditionalScheduleContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s ScheduleContainer) MarshalJSON() ([]byte, error) { + type opt ScheduleContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalScheduleContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalScheduleContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewScheduleContainer returns a ScheduleContainer. func NewScheduleContainer() *ScheduleContainer { - r := &ScheduleContainer{} + r := &ScheduleContainer{ + AdditionalScheduleContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type ScheduleContainerVariant interface { + ScheduleContainerCaster() *ScheduleContainer +} + +func (s *ScheduleContainer) ScheduleContainerCaster() *ScheduleContainer { + return s +} diff --git a/typedapi/types/scheduletimeofday.go b/typedapi/types/scheduletimeofday.go index bedd67ac4d..444dcf3132 100644 --- a/typedapi/types/scheduletimeofday.go +++ b/typedapi/types/scheduletimeofday.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // HourAndMinute // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L98-L103 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L99-L104 type ScheduleTimeOfDay any + +type ScheduleTimeOfDayVariant interface { + ScheduleTimeOfDayCaster() *ScheduleTimeOfDay +} diff --git a/typedapi/types/scheduletriggerevent.go b/typedapi/types/scheduletriggerevent.go index f9d03bd250..576a85d4e5 100644 --- a/typedapi/types/scheduletriggerevent.go +++ b/typedapi/types/scheduletriggerevent.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ScheduleTriggerEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L93-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L94-L97 type ScheduleTriggerEvent struct { ScheduledTime DateTime `json:"scheduled_time"` TriggeredTime DateTime `json:"triggered_time,omitempty"` @@ -72,3 +72,13 @@ func NewScheduleTriggerEvent() *ScheduleTriggerEvent { return r } + +// true + +type ScheduleTriggerEventVariant interface { + ScheduleTriggerEventCaster() *ScheduleTriggerEvent +} + +func (s *ScheduleTriggerEvent) ScheduleTriggerEventCaster() *ScheduleTriggerEvent { + return s +} diff --git a/typedapi/types/schedulingconfiguration.go b/typedapi/types/schedulingconfiguration.go index 27785359b0..87724c58c0 100644 --- a/typedapi/types/schedulingconfiguration.go +++ b/typedapi/types/schedulingconfiguration.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SchedulingConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L231-L235 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L246-L250 type SchedulingConfiguration struct { AccessControl *ConnectorScheduling `json:"access_control,omitempty"` Full *ConnectorScheduling `json:"full,omitempty"` @@ -35,3 +35,13 @@ func NewSchedulingConfiguration() *SchedulingConfiguration { return r } + +// true + +type SchedulingConfigurationVariant interface { + SchedulingConfigurationCaster() *SchedulingConfiguration +} + +func (s *SchedulingConfiguration) SchedulingConfigurationCaster() *SchedulingConfiguration { + return s +} diff --git a/typedapi/types/scoresort.go b/typedapi/types/scoresort.go index 9b04d31850..e592d6b904 100644 --- a/typedapi/types/scoresort.go +++ b/typedapi/types/scoresort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // ScoreSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L55-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L54-L56 type ScoreSort struct { Order *sortorder.SortOrder `json:"order,omitempty"` } @@ -37,3 +37,13 @@ func NewScoreSort() *ScoreSort { return r } + +// true + +type ScoreSortVariant interface { + ScoreSortCaster() *ScoreSort +} + +func (s *ScoreSort) ScoreSortCaster() *ScoreSort { + return s +} diff --git a/typedapi/types/script.go b/typedapi/types/script.go index 0a2e5d3837..2228f5b7ca 100644 --- a/typedapi/types/script.go +++ b/typedapi/types/script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // Script type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Scripting.ts#L73-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Scripting.ts#L75-L99 type Script struct { // Id The `id` for a stored script. Id *string `json:"id,omitempty"` @@ -120,9 +120,19 @@ func (s *Script) UnmarshalJSON(data []byte) error { // NewScript returns a Script. func NewScript() *Script { r := &Script{ - Options: make(map[string]string, 0), - Params: make(map[string]json.RawMessage, 0), + Options: make(map[string]string), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptVariant interface { + ScriptCaster() *Script +} + +func (s *Script) ScriptCaster() *Script { + return s +} diff --git a/typedapi/types/scriptcache.go b/typedapi/types/scriptcache.go index f78540129a..5a424e50dd 100644 --- a/typedapi/types/scriptcache.go +++ b/typedapi/types/scriptcache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ScriptCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L1031-L1045 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L1102-L1116 type ScriptCache struct { // CacheEvictions Total number of times the script cache has evicted old data. CacheEvictions *int64 `json:"cache_evictions,omitempty"` @@ -126,3 +126,5 @@ func NewScriptCache() *ScriptCache { return r } + +// false diff --git a/typedapi/types/scriptcondition.go b/typedapi/types/scriptcondition.go index 7ee89b9bbc..58a6866c47 100644 --- a/typedapi/types/scriptcondition.go +++ b/typedapi/types/scriptcondition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ScriptCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Conditions.ts#L79-L87 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Conditions.ts#L79-L87 type ScriptCondition struct { Id *string `json:"id,omitempty"` Lang *string `json:"lang,omitempty"` @@ -106,8 +106,18 @@ func (s *ScriptCondition) UnmarshalJSON(data []byte) error { // NewScriptCondition returns a ScriptCondition. func NewScriptCondition() *ScriptCondition { r := &ScriptCondition{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptConditionVariant interface { + ScriptConditionCaster() *ScriptCondition +} + +func (s *ScriptCondition) ScriptConditionCaster() *ScriptCondition { + return s +} diff --git a/typedapi/types/scriptedheuristic.go b/typedapi/types/scriptedheuristic.go index 5ec9492b28..6da4c07f7e 100644 --- a/typedapi/types/scriptedheuristic.go +++ b/typedapi/types/scriptedheuristic.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ScriptedHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L768-L770 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L813-L815 type ScriptedHeuristic struct { Script Script `json:"script"` } @@ -33,3 +33,13 @@ func NewScriptedHeuristic() *ScriptedHeuristic { return r } + +// true + +type ScriptedHeuristicVariant interface { + ScriptedHeuristicCaster() *ScriptedHeuristic +} + +func (s *ScriptedHeuristic) ScriptedHeuristicCaster() *ScriptedHeuristic { + return s +} diff --git a/typedapi/types/scriptedmetricaggregate.go b/typedapi/types/scriptedmetricaggregate.go index 39e42b759d..404abc9284 100644 --- a/typedapi/types/scriptedmetricaggregate.go +++ b/typedapi/types/scriptedmetricaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ScriptedMetricAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L653-L656 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L739-L745 type ScriptedMetricAggregate struct { Meta Metadata `json:"meta,omitempty"` Value json.RawMessage `json:"value,omitempty"` @@ -72,3 +72,5 @@ func NewScriptedMetricAggregate() *ScriptedMetricAggregate { return r } + +// false diff --git a/typedapi/types/scriptedmetricaggregation.go b/typedapi/types/scriptedmetricaggregation.go index 421a9f6372..bc5b49d1f3 100644 --- a/typedapi/types/scriptedmetricaggregation.go +++ b/typedapi/types/scriptedmetricaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ScriptedMetricAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L254-L280 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L263-L289 type ScriptedMetricAggregation struct { // CombineScript Runs once on each shard after document collection is complete. // Allows the aggregation to consolidate the state returned from each shard. @@ -125,8 +125,18 @@ func (s *ScriptedMetricAggregation) UnmarshalJSON(data []byte) error { // NewScriptedMetricAggregation returns a ScriptedMetricAggregation. func NewScriptedMetricAggregation() *ScriptedMetricAggregation { r := &ScriptedMetricAggregation{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptedMetricAggregationVariant interface { + ScriptedMetricAggregationCaster() *ScriptedMetricAggregation +} + +func (s *ScriptedMetricAggregation) ScriptedMetricAggregationCaster() *ScriptedMetricAggregation { + return s +} diff --git a/typedapi/types/scriptfield.go b/typedapi/types/scriptfield.go index a7517c9cd3..2aec409596 100644 --- a/typedapi/types/scriptfield.go +++ b/typedapi/types/scriptfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ScriptField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Scripting.ts#L99-L102 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Scripting.ts#L101-L104 type ScriptField struct { IgnoreFailure *bool `json:"ignore_failure,omitempty"` Script Script `json:"script"` @@ -82,3 +82,13 @@ func NewScriptField() *ScriptField { return r } + +// true + +type ScriptFieldVariant interface { + ScriptFieldCaster() *ScriptField +} + +func (s *ScriptField) ScriptFieldCaster() *ScriptField { + return s +} diff --git a/typedapi/types/scripting.go b/typedapi/types/scripting.go index ea3b0498c3..e7cc3d3cb6 100644 --- a/typedapi/types/scripting.go +++ b/typedapi/types/scripting.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Scripting type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L977-L995 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L1048-L1066 type Scripting struct { // CacheEvictions Total number of times the script cache has evicted old data. CacheEvictions *int64 `json:"cache_evictions,omitempty"` @@ -126,8 +126,10 @@ func (s *Scripting) UnmarshalJSON(data []byte) error { // NewScripting returns a Scripting. func NewScripting() *Scripting { r := &Scripting{ - CompilationsHistory: make(map[string]int64, 0), + CompilationsHistory: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/scriptprocessor.go b/typedapi/types/scriptprocessor.go index 98c7cd0c00..cfae68627d 100644 --- a/typedapi/types/scriptprocessor.go +++ b/typedapi/types/scriptprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ScriptProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L1005-L1025 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1404-L1424 type ScriptProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -172,8 +172,18 @@ func (s *ScriptProcessor) UnmarshalJSON(data []byte) error { // NewScriptProcessor returns a ScriptProcessor. func NewScriptProcessor() *ScriptProcessor { r := &ScriptProcessor{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptProcessorVariant interface { + ScriptProcessorCaster() *ScriptProcessor +} + +func (s *ScriptProcessor) ScriptProcessorCaster() *ScriptProcessor { + return s +} diff --git a/typedapi/types/scriptquery.go b/typedapi/types/scriptquery.go index 2570f8608c..08684e1d7f 100644 --- a/typedapi/types/scriptquery.go +++ b/typedapi/types/scriptquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ScriptQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L327-L333 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L337-L346 type ScriptQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -104,3 +104,13 @@ func NewScriptQuery() *ScriptQuery { return r } + +// true + +type ScriptQueryVariant interface { + ScriptQueryCaster() *ScriptQuery +} + +func (s *ScriptQuery) ScriptQueryCaster() *ScriptQuery { + return s +} diff --git a/typedapi/types/scriptscorefunction.go b/typedapi/types/scriptscorefunction.go index 756c2ca740..cba0d76182 100644 --- a/typedapi/types/scriptscorefunction.go +++ b/typedapi/types/scriptscorefunction.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ScriptScoreFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L124-L129 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L137-L142 type ScriptScoreFunction struct { // Script A script that computes a score. Script Script `json:"script"` @@ -34,3 +34,13 @@ func NewScriptScoreFunction() *ScriptScoreFunction { return r } + +// true + +type ScriptScoreFunctionVariant interface { + ScriptScoreFunctionCaster() *ScriptScoreFunction +} + +func (s *ScriptScoreFunction) ScriptScoreFunctionCaster() *ScriptScoreFunction { + return s +} diff --git a/typedapi/types/scriptscorequery.go b/typedapi/types/scriptscorequery.go index a21a5698b1..7eb2a5a712 100644 --- a/typedapi/types/scriptscorequery.go +++ b/typedapi/types/scriptscorequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ScriptScoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L335-L349 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L348-L365 type ScriptScoreQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -43,7 +43,7 @@ type ScriptScoreQuery struct { // from the search results. MinScore *float32 `json:"min_score,omitempty"` // Query Query used to return documents. - Query *Query `json:"query,omitempty"` + Query Query `json:"query"` QueryName_ *string `json:"_name,omitempty"` // Script Script used to compute the score of documents returned by the query. // Important: final relevance scores from the `script_score` query cannot be @@ -131,3 +131,13 @@ func NewScriptScoreQuery() *ScriptScoreQuery { return r } + +// true + +type ScriptScoreQueryVariant interface { + ScriptScoreQueryCaster() *ScriptScoreQuery +} + +func (s *ScriptScoreQuery) ScriptScoreQueryCaster() *ScriptScoreQuery { + return s +} diff --git a/typedapi/types/scriptsort.go b/typedapi/types/scriptsort.go index e99ea49916..613efa2624 100644 --- a/typedapi/types/scriptsort.go +++ b/typedapi/types/scriptsort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,7 +28,7 @@ import ( // ScriptSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L73-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L72-L78 type ScriptSort struct { Mode *sortmode.SortMode `json:"mode,omitempty"` Nested *NestedSortValue `json:"nested,omitempty"` @@ -43,3 +43,13 @@ func NewScriptSort() *ScriptSort { return r } + +// true + +type ScriptSortVariant interface { + ScriptSortCaster() *ScriptSort +} + +func (s *ScriptSort) ScriptSortCaster() *ScriptSort { + return s +} diff --git a/typedapi/types/scripttransform.go b/typedapi/types/scripttransform.go index 04eaab437e..6cf6049036 100644 --- a/typedapi/types/scripttransform.go +++ b/typedapi/types/scripttransform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ScriptTransform type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Transform.ts#L36-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Transform.ts#L36-L44 type ScriptTransform struct { Id *string `json:"id,omitempty"` Lang *string `json:"lang,omitempty"` @@ -106,8 +106,18 @@ func (s *ScriptTransform) UnmarshalJSON(data []byte) error { // NewScriptTransform returns a ScriptTransform. func NewScriptTransform() *ScriptTransform { r := &ScriptTransform{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type ScriptTransformVariant interface { + ScriptTransformCaster() *ScriptTransform +} + +func (s *ScriptTransform) ScriptTransformCaster() *ScriptTransform { + return s +} diff --git a/typedapi/types/scrollids.go b/typedapi/types/scrollids.go index 915fc3f100..10cbb1885d 100644 --- a/typedapi/types/scrollids.go +++ b/typedapi/types/scrollids.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ScrollIds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L56-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L56-L56 type ScrollIds []string + +type ScrollIdsVariant interface { + ScrollIdsCaster() *ScrollIds +} diff --git a/typedapi/types/searchablesnapshotaction.go b/typedapi/types/searchablesnapshotaction.go index 1a95d2834d..0a3a0296c1 100644 --- a/typedapi/types/searchablesnapshotaction.go +++ b/typedapi/types/searchablesnapshotaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SearchableSnapshotAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L131-L134 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L128-L131 type SearchableSnapshotAction struct { ForceMergeIndex *bool `json:"force_merge_index,omitempty"` SnapshotRepository string `json:"snapshot_repository"` @@ -89,3 +89,13 @@ func NewSearchableSnapshotAction() *SearchableSnapshotAction { return r } + +// true + +type SearchableSnapshotActionVariant interface { + SearchableSnapshotActionCaster() *SearchableSnapshotAction +} + +func (s *SearchableSnapshotAction) SearchableSnapshotActionCaster() *SearchableSnapshotAction { + return s +} diff --git a/typedapi/types/searchablesnapshots.go b/typedapi/types/searchablesnapshots.go index 171d4d25e5..1ea60bded2 100644 --- a/typedapi/types/searchablesnapshots.go +++ b/typedapi/types/searchablesnapshots.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SearchableSnapshots type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L428-L432 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L438-L442 type SearchableSnapshots struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -142,3 +142,5 @@ func NewSearchableSnapshots() *SearchableSnapshots { return r } + +// false diff --git a/typedapi/types/searchaccess.go b/typedapi/types/searchaccess.go new file mode 100644 index 0000000000..5d364382c4 --- /dev/null +++ b/typedapi/types/searchaccess.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SearchAccess type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L454-L474 +type SearchAccess struct { + // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that + // cover restricted indices. Implicitly, restricted indices have limited + // privileges that can cause pattern tests to fail. If restricted indices are + // explicitly included in the `names` list, Elasticsearch checks privileges + // against these indices regardless of the value set for + // `allow_restricted_indices`. + AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` + // FieldSecurity The document fields that the owners of the role have read access to. + FieldSecurity *FieldSecurity `json:"field_security,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` + // Query A search query that defines the documents the owners of the role have access + // to. A document within the specified indices must match this query for it to + // be accessible by the owners of the role. + Query IndicesPrivilegesQuery `json:"query,omitempty"` +} + +func (s *SearchAccess) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return fmt.Errorf("%s | %w", "FieldSecurity", err) + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + case "query": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + query_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Query", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + case "template": + o := NewRoleTemplateQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + } + } + if s.Query == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + } + + } + } + return nil +} + +// NewSearchAccess returns a SearchAccess. +func NewSearchAccess() *SearchAccess { + r := &SearchAccess{} + + return r +} + +// true + +type SearchAccessVariant interface { + SearchAccessCaster() *SearchAccess +} + +func (s *SearchAccess) SearchAccessCaster() *SearchAccess { + return s +} diff --git a/typedapi/types/searchapplication.go b/typedapi/types/searchapplication.go index fa88fd5928..5c7f6cff23 100644 --- a/typedapi/types/searchapplication.go +++ b/typedapi/types/searchapplication.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,13 +30,13 @@ import ( // SearchApplication type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/_types/SearchApplication.ts#L24-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/_types/SearchApplication.ts#L24-L33 type SearchApplication struct { // AnalyticsCollectionName Analytics collection associated to the Search Application. AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` // Indices Indices that are part of the Search Application. Indices []string `json:"indices"` - // Name Search Application name. + // Name Search Application name Name string `json:"name"` // Template Search template to use on search operations. Template *SearchApplicationTemplate `json:"template,omitempty"` @@ -95,3 +95,5 @@ func NewSearchApplication() *SearchApplication { return r } + +// false diff --git a/typedapi/types/searchapplicationlistitem.go b/typedapi/types/searchapplicationparameters.go similarity index 57% rename from typedapi/types/searchapplicationlistitem.go rename to typedapi/types/searchapplicationparameters.go index 7030ce5210..a70c227108 100644 --- a/typedapi/types/searchapplicationlistitem.go +++ b/typedapi/types/searchapplicationparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,21 +28,19 @@ import ( "io" ) -// SearchApplicationListItem type. +// SearchApplicationParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/list/SearchApplicationsListResponse.ts#L31-L48 -type SearchApplicationListItem struct { - // AnalyticsCollectionName Analytics collection associated to the Search Application +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/_types/SearchApplicationParameters.ts#L23-L36 +type SearchApplicationParameters struct { + // AnalyticsCollectionName Analytics collection associated to the Search Application. AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` - // Indices Indices that are part of the Search Application + // Indices Indices that are part of the Search Application. Indices []string `json:"indices"` - // Name Search Application name - Name string `json:"name"` - // UpdatedAtMillis Last time the Search Application was updated - UpdatedAtMillis int64 `json:"updated_at_millis"` + // Template Search template to use on search operations. + Template *SearchApplicationTemplate `json:"template,omitempty"` } -func (s *SearchApplicationListItem) UnmarshalJSON(data []byte) error { +func (s *SearchApplicationParameters) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -67,14 +65,9 @@ func (s *SearchApplicationListItem) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Indices", err) } - case "name": - if err := dec.Decode(&s.Name); err != nil { - return fmt.Errorf("%s | %w", "Name", err) - } - - case "updated_at_millis": - if err := dec.Decode(&s.UpdatedAtMillis); err != nil { - return fmt.Errorf("%s | %w", "UpdatedAtMillis", err) + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) } } @@ -82,9 +75,19 @@ func (s *SearchApplicationListItem) UnmarshalJSON(data []byte) error { return nil } -// NewSearchApplicationListItem returns a SearchApplicationListItem. -func NewSearchApplicationListItem() *SearchApplicationListItem { - r := &SearchApplicationListItem{} +// NewSearchApplicationParameters returns a SearchApplicationParameters. +func NewSearchApplicationParameters() *SearchApplicationParameters { + r := &SearchApplicationParameters{} return r } + +// true + +type SearchApplicationParametersVariant interface { + SearchApplicationParametersCaster() *SearchApplicationParameters +} + +func (s *SearchApplicationParameters) SearchApplicationParametersCaster() *SearchApplicationParameters { + return s +} diff --git a/typedapi/types/searchapplicationtemplate.go b/typedapi/types/searchapplicationtemplate.go index 35dbc96ff8..542b1f731f 100644 --- a/typedapi/types/searchapplicationtemplate.go +++ b/typedapi/types/searchapplicationtemplate.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SearchApplicationTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/search_application/_types/SearchApplication.ts#L47-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/search_application/_types/SearchApplicationTemplate.ts#L22-L27 type SearchApplicationTemplate struct { // Script The associated mustache template. Script Script `json:"script"` @@ -34,3 +34,13 @@ func NewSearchApplicationTemplate() *SearchApplicationTemplate { return r } + +// true + +type SearchApplicationTemplateVariant interface { + SearchApplicationTemplateCaster() *SearchApplicationTemplate +} + +func (s *SearchApplicationTemplate) SearchApplicationTemplateCaster() *SearchApplicationTemplate { + return s +} diff --git a/typedapi/types/searchasyoutypeproperty.go b/typedapi/types/searchasyoutypeproperty.go index 379a6c9522..a5812d332f 100644 --- a/typedapi/types/searchasyoutypeproperty.go +++ b/typedapi/types/searchasyoutypeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" ) // SearchAsYouTypeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L212-L222 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L228-L239 type SearchAsYouTypeProperty struct { Analyzer *string `json:"analyzer,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -46,15 +47,16 @@ type SearchAsYouTypeProperty struct { IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` MaxShingleSize *int `json:"max_shingle_size,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Norms *bool `json:"norms,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - SearchAnalyzer *string `json:"search_analyzer,omitempty"` - SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` + Similarity *string `json:"similarity,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` + Type string `json:"type,omitempty"` } func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { @@ -124,301 +126,313 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -516,301 +530,313 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -866,6 +892,11 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "term_vector": if err := dec.Decode(&s.TermVector); err != nil { return fmt.Errorf("%s | %w", "TermVector", err) @@ -900,6 +931,7 @@ func (s SearchAsYouTypeProperty) MarshalJSON() ([]byte, error) { SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TermVector: s.TermVector, Type: s.Type, } @@ -912,10 +944,20 @@ func (s SearchAsYouTypeProperty) MarshalJSON() ([]byte, error) { // NewSearchAsYouTypeProperty returns a SearchAsYouTypeProperty. func NewSearchAsYouTypeProperty() *SearchAsYouTypeProperty { r := &SearchAsYouTypeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type SearchAsYouTypePropertyVariant interface { + SearchAsYouTypePropertyCaster() *SearchAsYouTypeProperty +} + +func (s *SearchAsYouTypeProperty) SearchAsYouTypePropertyCaster() *SearchAsYouTypeProperty { + return s +} diff --git a/typedapi/types/searchidle.go b/typedapi/types/searchidle.go index 30ae80f526..8b9837227c 100644 --- a/typedapi/types/searchidle.go +++ b/typedapi/types/searchidle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SearchIdle type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L245-L248 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L253-L256 type SearchIdle struct { After Duration `json:"after,omitempty"` } @@ -66,3 +66,13 @@ func NewSearchIdle() *SearchIdle { return r } + +// true + +type SearchIdleVariant interface { + SearchIdleCaster() *SearchIdle +} + +func (s *SearchIdle) SearchIdleCaster() *SearchIdle { + return s +} diff --git a/typedapi/types/searchinput.go b/typedapi/types/searchinput.go index 48296a5cc6..9c1bbd46ae 100644 --- a/typedapi/types/searchinput.go +++ b/typedapi/types/searchinput.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SearchInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L112-L116 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L112-L116 type SearchInput struct { Extract []string `json:"extract,omitempty"` Request SearchInputRequestDefinition `json:"request"` @@ -78,3 +78,13 @@ func NewSearchInput() *SearchInput { return r } + +// true + +type SearchInputVariant interface { + SearchInputCaster() *SearchInput +} + +func (s *SearchInput) SearchInputCaster() *SearchInput { + return s +} diff --git a/typedapi/types/searchinputrequestbody.go b/typedapi/types/searchinputrequestbody.go index ac6695a5ed..0f3b60ed62 100644 --- a/typedapi/types/searchinputrequestbody.go +++ b/typedapi/types/searchinputrequestbody.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SearchInputRequestBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L147-L149 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L147-L149 type SearchInputRequestBody struct { Query Query `json:"query"` } @@ -33,3 +33,13 @@ func NewSearchInputRequestBody() *SearchInputRequestBody { return r } + +// true + +type SearchInputRequestBodyVariant interface { + SearchInputRequestBodyCaster() *SearchInputRequestBody +} + +func (s *SearchInputRequestBody) SearchInputRequestBodyCaster() *SearchInputRequestBody { + return s +} diff --git a/typedapi/types/searchinputrequestdefinition.go b/typedapi/types/searchinputrequestdefinition.go index 7044274c85..0abf471b71 100644 --- a/typedapi/types/searchinputrequestdefinition.go +++ b/typedapi/types/searchinputrequestdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SearchInputRequestDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L118-L125 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L118-L125 type SearchInputRequestDefinition struct { Body *SearchInputRequestBody `json:"body,omitempty"` Indices []string `json:"indices,omitempty"` @@ -108,3 +108,13 @@ func NewSearchInputRequestDefinition() *SearchInputRequestDefinition { return r } + +// true + +type SearchInputRequestDefinitionVariant interface { + SearchInputRequestDefinitionCaster() *SearchInputRequestDefinition +} + +func (s *SearchInputRequestDefinition) SearchInputRequestDefinitionCaster() *SearchInputRequestDefinition { + return s +} diff --git a/typedapi/types/searchprofile.go b/typedapi/types/searchprofile.go index e4335c3512..b49512d567 100644 --- a/typedapi/types/searchprofile.go +++ b/typedapi/types/searchprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SearchProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L126-L130 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L136-L140 type SearchProfile struct { Collector []Collector `json:"collector"` Query []QueryProfile `json:"query"` @@ -89,3 +89,5 @@ func NewSearchProfile() *SearchProfile { return r } + +// false diff --git a/typedapi/types/searchshardsnodeattributes.go b/typedapi/types/searchshardsnodeattributes.go new file mode 100644 index 0000000000..bf95b795e8 --- /dev/null +++ b/typedapi/types/searchshardsnodeattributes.go @@ -0,0 +1,159 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" +) + +// SearchShardsNodeAttributes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search_shards/SearchShardsResponse.ts#L42-L60 +type SearchShardsNodeAttributes struct { + // Attributes Lists node attributes. + Attributes map[string]string `json:"attributes"` + // EphemeralId The ephemeral ID of the node. + EphemeralId string `json:"ephemeral_id"` + ExternalId string `json:"external_id"` + MaxIndexVersion int `json:"max_index_version"` + MinIndexVersion int `json:"min_index_version"` + // Name The human-readable identifier of the node. + Name string `json:"name"` + Roles []noderole.NodeRole `json:"roles"` + // TransportAddress The host and port where transport HTTP connections are accepted. + TransportAddress string `json:"transport_address"` + Version string `json:"version"` +} + +func (s *SearchShardsNodeAttributes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) + } + + case "external_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ExternalId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExternalId = o + + case "max_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxIndexVersion", err) + } + s.MaxIndexVersion = value + case float64: + f := int(v) + s.MaxIndexVersion = f + } + + case "min_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinIndexVersion", err) + } + s.MinIndexVersion = value + case float64: + f := int(v) + s.MinIndexVersion = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewSearchShardsNodeAttributes returns a SearchShardsNodeAttributes. +func NewSearchShardsNodeAttributes() *SearchShardsNodeAttributes { + r := &SearchShardsNodeAttributes{ + Attributes: make(map[string]string), + } + + return r +} + +// false diff --git a/typedapi/types/searchstats.go b/typedapi/types/searchstats.go index a176debedb..b209b10d39 100644 --- a/typedapi/types/searchstats.go +++ b/typedapi/types/searchstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SearchStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L252-L271 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L255-L274 type SearchStats struct { FetchCurrent int64 `json:"fetch_current"` FetchTime Duration `json:"fetch_time,omitempty"` @@ -259,8 +259,10 @@ func (s *SearchStats) UnmarshalJSON(data []byte) error { // NewSearchStats returns a SearchStats. func NewSearchStats() *SearchStats { r := &SearchStats{ - Groups: make(map[string]SearchStats, 0), + Groups: make(map[string]SearchStats), } return r } + +// false diff --git a/typedapi/types/searchtemplaterequestbody.go b/typedapi/types/searchtemplaterequestbody.go index 16072f9f0c..3714c2c404 100644 --- a/typedapi/types/searchtemplaterequestbody.go +++ b/typedapi/types/searchtemplaterequestbody.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SearchTemplateRequestBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L128-L145 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L128-L145 type SearchTemplateRequestBody struct { Explain *bool `json:"explain,omitempty"` // Id ID of the search template to use. If no source is specified, @@ -121,8 +121,18 @@ func (s *SearchTemplateRequestBody) UnmarshalJSON(data []byte) error { // NewSearchTemplateRequestBody returns a SearchTemplateRequestBody. func NewSearchTemplateRequestBody() *SearchTemplateRequestBody { r := &SearchTemplateRequestBody{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type SearchTemplateRequestBodyVariant interface { + SearchTemplateRequestBodyCaster() *SearchTemplateRequestBody +} + +func (s *SearchTemplateRequestBody) SearchTemplateRequestBodyCaster() *SearchTemplateRequestBody { + return s +} diff --git a/typedapi/types/searchtransform.go b/typedapi/types/searchtransform.go index 6a611cb376..c379efabac 100644 --- a/typedapi/types/searchtransform.go +++ b/typedapi/types/searchtransform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SearchTransform type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Transform.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Transform.ts#L46-L49 type SearchTransform struct { Request SearchInputRequestDefinition `json:"request"` Timeout Duration `json:"timeout"` @@ -72,3 +72,13 @@ func NewSearchTransform() *SearchTransform { return r } + +// true + +type SearchTransformVariant interface { + SearchTransformCaster() *SearchTransform +} + +func (s *SearchTransform) SearchTransformCaster() *SearchTransform { + return s +} diff --git a/typedapi/types/security.go b/typedapi/types/security.go index f630b42ec6..0061a234af 100644 --- a/typedapi/types/security.go +++ b/typedapi/types/security.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Security type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L434-L447 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L444-L457 type Security struct { Anonymous FeatureToggle `json:"anonymous"` ApiKeyService FeatureToggle `json:"api_key_service"` @@ -166,9 +166,11 @@ func (s *Security) UnmarshalJSON(data []byte) error { // NewSecurity returns a Security. func NewSecurity() *Security { r := &Security{ - Realms: make(map[string]XpackRealm, 0), - RoleMapping: make(map[string]XpackRoleMapping, 0), + Realms: make(map[string]XpackRealm), + RoleMapping: make(map[string]XpackRoleMapping), } return r } + +// false diff --git a/typedapi/types/securityrolemapping.go b/typedapi/types/securityrolemapping.go index 5cb804024d..c5684e453f 100644 --- a/typedapi/types/securityrolemapping.go +++ b/typedapi/types/securityrolemapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SecurityRoleMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/RoleMapping.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/RoleMapping.ts#L25-L33 type SecurityRoleMapping struct { Enabled bool `json:"enabled"` Metadata Metadata `json:"metadata"` @@ -100,3 +100,5 @@ func NewSecurityRoleMapping() *SecurityRoleMapping { return r } + +// false diff --git a/typedapi/types/securityroles.go b/typedapi/types/securityroles.go index 27047e10e2..3f90b214e6 100644 --- a/typedapi/types/securityroles.go +++ b/typedapi/types/securityroles.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SecurityRoles type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L296-L300 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L306-L310 type SecurityRoles struct { Dls SecurityRolesDls `json:"dls"` File SecurityRolesFile `json:"file"` @@ -35,3 +35,5 @@ func NewSecurityRoles() *SecurityRoles { return r } + +// false diff --git a/typedapi/types/securityrolesdls.go b/typedapi/types/securityrolesdls.go index cb24fe8b78..00c3b51a8a 100644 --- a/typedapi/types/securityrolesdls.go +++ b/typedapi/types/securityrolesdls.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SecurityRolesDls type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L308-L310 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L318-L320 type SecurityRolesDls struct { BitSetCache SecurityRolesDlsBitSetCache `json:"bit_set_cache"` } @@ -33,3 +33,5 @@ func NewSecurityRolesDls() *SecurityRolesDls { return r } + +// false diff --git a/typedapi/types/securityrolesdlsbitsetcache.go b/typedapi/types/securityrolesdlsbitsetcache.go index a9b859f7da..9426886ed3 100644 --- a/typedapi/types/securityrolesdlsbitsetcache.go +++ b/typedapi/types/securityrolesdlsbitsetcache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SecurityRolesDlsBitSetCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L312-L316 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L322-L326 type SecurityRolesDlsBitSetCache struct { Count int `json:"count"` Memory ByteSize `json:"memory,omitempty"` @@ -90,3 +90,5 @@ func NewSecurityRolesDlsBitSetCache() *SecurityRolesDlsBitSetCache { return r } + +// false diff --git a/typedapi/types/securityrolesfile.go b/typedapi/types/securityrolesfile.go index ac8f25ed4b..f9a153a3fa 100644 --- a/typedapi/types/securityrolesfile.go +++ b/typedapi/types/securityrolesfile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SecurityRolesFile type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L318-L322 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L328-L332 type SecurityRolesFile struct { Dls bool `json:"dls"` Fls bool `json:"fls"` @@ -107,3 +107,5 @@ func NewSecurityRolesFile() *SecurityRolesFile { return r } + +// false diff --git a/typedapi/types/securityrolesnative.go b/typedapi/types/securityrolesnative.go index 1a64ab6dd2..cd0d32a1e7 100644 --- a/typedapi/types/securityrolesnative.go +++ b/typedapi/types/securityrolesnative.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SecurityRolesNative type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L302-L306 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L312-L316 type SecurityRolesNative struct { Dls bool `json:"dls"` Fls bool `json:"fls"` @@ -107,3 +107,5 @@ func NewSecurityRolesNative() *SecurityRolesNative { return r } + +// false diff --git a/typedapi/types/securitysettings.go b/typedapi/types/securitysettings.go new file mode 100644 index 0000000000..71aef358aa --- /dev/null +++ b/typedapi/types/securitysettings.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// SecuritySettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/SecuritySettings.ts#L22-L24 +type SecuritySettings struct { + Index *IndexSettings `json:"index,omitempty"` +} + +// NewSecuritySettings returns a SecuritySettings. +func NewSecuritySettings() *SecuritySettings { + r := &SecuritySettings{} + + return r +} + +// true + +type SecuritySettingsVariant interface { + SecuritySettingsCaster() *SecuritySettings +} + +func (s *SecuritySettings) SecuritySettingsCaster() *SecuritySettings { + return s +} diff --git a/typedapi/types/segment.go b/typedapi/types/segment.go index 60b492cf5e..1ad60362db 100644 --- a/typedapi/types/segment.go +++ b/typedapi/types/segment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Segment type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/segments/types.ts#L28-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/segments/types.ts#L28-L38 type Segment struct { Attributes map[string]string `json:"attributes"` Committed bool `json:"committed"` @@ -184,8 +184,10 @@ func (s *Segment) UnmarshalJSON(data []byte) error { // NewSegment returns a Segment. func NewSegment() *Segment { r := &Segment{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/segmentsrecord.go b/typedapi/types/segmentsrecord.go index 9fd7504aa4..c66fe6c594 100644 --- a/typedapi/types/segmentsrecord.go +++ b/typedapi/types/segmentsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SegmentsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/segments/types.ts#L22-L107 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/segments/types.ts#L22-L107 type SegmentsRecord struct { // Committed If `true`, the segment is synced to disk. // Segments that are synced can survive a hard reboot. @@ -259,3 +259,5 @@ func NewSegmentsRecord() *SegmentsRecord { return r } + +// false diff --git a/typedapi/types/segmentsstats.go b/typedapi/types/segmentsstats.go index 598d97e8e2..81a49f7894 100644 --- a/typedapi/types/segmentsstats.go +++ b/typedapi/types/segmentsstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SegmentsStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L273-L366 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L276-L369 type SegmentsStats struct { // Count Total number of segments across all shards assigned to selected nodes. Count int `json:"count"` @@ -381,8 +381,10 @@ func (s *SegmentsStats) UnmarshalJSON(data []byte) error { // NewSegmentsStats returns a SegmentsStats. func NewSegmentsStats() *SegmentsStats { r := &SegmentsStats{ - FileSizes: make(map[string]ShardFileSizeInfo, 0), + FileSizes: make(map[string]ShardFileSizeInfo), } return r } + +// false diff --git a/typedapi/types/selectoption.go b/typedapi/types/selectoption.go index e6cca2aebd..45d7c7d7b0 100644 --- a/typedapi/types/selectoption.go +++ b/typedapi/types/selectoption.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SelectOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L25-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L25-L28 type SelectOption struct { Label string `json:"label"` Value ScalarValue `json:"value"` @@ -80,3 +80,13 @@ func NewSelectOption() *SelectOption { return r } + +// true + +type SelectOptionVariant interface { + SelectOptionCaster() *SelectOption +} + +func (s *SelectOption) SelectOptionCaster() *SelectOption { + return s +} diff --git a/typedapi/types/semanticquery.go b/typedapi/types/semanticquery.go index 70cd67c851..8ee07248f8 100644 --- a/typedapi/types/semanticquery.go +++ b/typedapi/types/semanticquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SemanticQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/SemanticQuery.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/SemanticQuery.ts#L22-L30 type SemanticQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -124,3 +124,13 @@ func NewSemanticQuery() *SemanticQuery { return r } + +// true + +type SemanticQueryVariant interface { + SemanticQueryCaster() *SemanticQuery +} + +func (s *SemanticQuery) SemanticQueryCaster() *SemanticQuery { + return s +} diff --git a/typedapi/types/semantictextproperty.go b/typedapi/types/semantictextproperty.go index 9af76a1a2f..1c9c430df3 100644 --- a/typedapi/types/semantictextproperty.go +++ b/typedapi/types/semantictextproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,11 +30,22 @@ import ( // SemanticTextProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L206-L210 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L210-L226 type SemanticTextProperty struct { - InferenceId string `json:"inference_id"` + // InferenceId Inference endpoint that will be used to generate embeddings for the field. + // This parameter cannot be updated. Use the Create inference API to create the + // endpoint. + // If `search_inference_id` is specified, the inference endpoint will only be + // used at index time. + InferenceId *string `json:"inference_id,omitempty"` Meta map[string]string `json:"meta,omitempty"` - Type string `json:"type,omitempty"` + // SearchInferenceId Inference endpoint that will be used to generate embeddings at query time. + // You can update this parameter by using the Update mapping API. Use the Create + // inference API to create the endpoint. + // If not specified, the inference endpoint defined by inference_id will be used + // at both index and query time. + SearchInferenceId *string `json:"search_inference_id,omitempty"` + Type string `json:"type,omitempty"` } func (s *SemanticTextProperty) UnmarshalJSON(data []byte) error { @@ -65,6 +76,11 @@ func (s *SemanticTextProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Meta", err) } + case "search_inference_id": + if err := dec.Decode(&s.SearchInferenceId); err != nil { + return fmt.Errorf("%s | %w", "SearchInferenceId", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -79,9 +95,10 @@ func (s *SemanticTextProperty) UnmarshalJSON(data []byte) error { func (s SemanticTextProperty) MarshalJSON() ([]byte, error) { type innerSemanticTextProperty SemanticTextProperty tmp := innerSemanticTextProperty{ - InferenceId: s.InferenceId, - Meta: s.Meta, - Type: s.Type, + InferenceId: s.InferenceId, + Meta: s.Meta, + SearchInferenceId: s.SearchInferenceId, + Type: s.Type, } tmp.Type = "semantic_text" @@ -92,8 +109,18 @@ func (s SemanticTextProperty) MarshalJSON() ([]byte, error) { // NewSemanticTextProperty returns a SemanticTextProperty. func NewSemanticTextProperty() *SemanticTextProperty { r := &SemanticTextProperty{ - Meta: make(map[string]string, 0), + Meta: make(map[string]string), } return r } + +// true + +type SemanticTextPropertyVariant interface { + SemanticTextPropertyCaster() *SemanticTextProperty +} + +func (s *SemanticTextProperty) SemanticTextPropertyCaster() *SemanticTextProperty { + return s +} diff --git a/typedapi/types/serbiananalyzer.go b/typedapi/types/serbiananalyzer.go new file mode 100644 index 0000000000..10cf4831de --- /dev/null +++ b/typedapi/types/serbiananalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SerbianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L262-L267 +type SerbianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SerbianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SerbianAnalyzer) MarshalJSON() ([]byte, error) { + type innerSerbianAnalyzer SerbianAnalyzer + tmp := innerSerbianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "serbian" + + return json.Marshal(tmp) +} + +// NewSerbianAnalyzer returns a SerbianAnalyzer. +func NewSerbianAnalyzer() *SerbianAnalyzer { + r := &SerbianAnalyzer{} + + return r +} + +// true + +type SerbianAnalyzerVariant interface { + SerbianAnalyzerCaster() *SerbianAnalyzer +} + +func (s *SerbianAnalyzer) SerbianAnalyzerCaster() *SerbianAnalyzer { + return s +} diff --git a/typedapi/types/serialdifferencingaggregation.go b/typedapi/types/serialdifferencingaggregation.go index 519dadfa25..f9e2f9a1e1 100644 --- a/typedapi/types/serialdifferencingaggregation.go +++ b/typedapi/types/serialdifferencingaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SerialDifferencingAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L361-L367 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L399-L408 type SerialDifferencingAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -112,3 +112,13 @@ func NewSerialDifferencingAggregation() *SerialDifferencingAggregation { return r } + +// true + +type SerialDifferencingAggregationVariant interface { + SerialDifferencingAggregationCaster() *SerialDifferencingAggregation +} + +func (s *SerialDifferencingAggregation) SerialDifferencingAggregationCaster() *SerialDifferencingAggregation { + return s +} diff --git a/typedapi/types/serializedclusterstate.go b/typedapi/types/serializedclusterstate.go index f9661af5d7..28ed741cc8 100644 --- a/typedapi/types/serializedclusterstate.go +++ b/typedapi/types/serializedclusterstate.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SerializedClusterState type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L232-L238 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L232-L238 type SerializedClusterState struct { Diffs *SerializedClusterStateDetail `json:"diffs,omitempty"` // FullStates Number of published cluster states. @@ -35,3 +35,5 @@ func NewSerializedClusterState() *SerializedClusterState { return r } + +// false diff --git a/typedapi/types/serializedclusterstatedetail.go b/typedapi/types/serializedclusterstatedetail.go index ce45d1cef6..f203723493 100644 --- a/typedapi/types/serializedclusterstatedetail.go +++ b/typedapi/types/serializedclusterstatedetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SerializedClusterStateDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L240-L246 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L240-L246 type SerializedClusterStateDetail struct { CompressedSize *string `json:"compressed_size,omitempty"` CompressedSizeInBytes *int64 `json:"compressed_size_in_bytes,omitempty"` @@ -135,3 +135,5 @@ func NewSerializedClusterStateDetail() *SerializedClusterStateDetail { return r } + +// false diff --git a/typedapi/types/servicetoken.go b/typedapi/types/servicetoken.go index 1bbd8e34fc..2c486f14cf 100644 --- a/typedapi/types/servicetoken.go +++ b/typedapi/types/servicetoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ServiceToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/create_service_token/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/create_service_token/types.ts#L22-L25 type ServiceToken struct { Name string `json:"name"` Value string `json:"value"` @@ -80,3 +80,5 @@ func NewServiceToken() *ServiceToken { return r } + +// false diff --git a/typedapi/types/setpriorityaction.go b/typedapi/types/setpriorityaction.go index 4f44138316..f303391a3e 100644 --- a/typedapi/types/setpriorityaction.go +++ b/typedapi/types/setpriorityaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SetPriorityAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L98-L100 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L95-L97 type SetPriorityAction struct { Priority *int `json:"priority,omitempty"` } @@ -78,3 +78,13 @@ func NewSetPriorityAction() *SetPriorityAction { return r } + +// true + +type SetPriorityActionVariant interface { + SetPriorityActionCaster() *SetPriorityAction +} + +func (s *SetPriorityAction) SetPriorityActionCaster() *SetPriorityAction { + return s +} diff --git a/typedapi/types/setprocessor.go b/typedapi/types/setprocessor.go index ceacffd6b6..a26c93da3a 100644 --- a/typedapi/types/setprocessor.go +++ b/typedapi/types/setprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SetProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L1027-L1061 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1426-L1460 type SetProcessor struct { // CopyFrom The origin field which will be copied to `field`, cannot set `value` // simultaneously. @@ -207,3 +207,13 @@ func NewSetProcessor() *SetProcessor { return r } + +// true + +type SetProcessorVariant interface { + SetProcessorCaster() *SetProcessor +} + +func (s *SetProcessor) SetProcessorCaster() *SetProcessor { + return s +} diff --git a/typedapi/types/setsecurityuserprocessor.go b/typedapi/types/setsecurityuserprocessor.go index e6cd1ad0c9..d49e78189c 100644 --- a/typedapi/types/setsecurityuserprocessor.go +++ b/typedapi/types/setsecurityuserprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SetSecurityUserProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L1063-L1072 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1462-L1471 type SetSecurityUserProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -142,3 +142,13 @@ func NewSetSecurityUserProcessor() *SetSecurityUserProcessor { return r } + +// true + +type SetSecurityUserProcessorVariant interface { + SetSecurityUserProcessorCaster() *SetSecurityUserProcessor +} + +func (s *SetSecurityUserProcessor) SetSecurityUserProcessorCaster() *SetSecurityUserProcessor { + return s +} diff --git a/typedapi/types/settings.go b/typedapi/types/settings.go index ca000085a0..5c531c3e0f 100644 --- a/typedapi/types/settings.go +++ b/typedapi/types/settings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Settings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L98-L144 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L98-L144 type Settings struct { // AlignCheckpoints Specifies whether the transform checkpoint ranges should be optimized for // performance. Such optimization can align @@ -182,3 +182,13 @@ func NewSettings() *Settings { return r } + +// true + +type SettingsVariant interface { + SettingsCaster() *Settings +} + +func (s *Settings) SettingsCaster() *Settings { + return s +} diff --git a/typedapi/types/settingsanalyze.go b/typedapi/types/settingsanalyze.go index 57f604f062..0f2d010456 100644 --- a/typedapi/types/settingsanalyze.go +++ b/typedapi/types/settingsanalyze.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SettingsAnalyze type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L235-L238 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L243-L246 type SettingsAnalyze struct { MaxTokenCount Stringifiedinteger `json:"max_token_count,omitempty"` } @@ -66,3 +66,13 @@ func NewSettingsAnalyze() *SettingsAnalyze { return r } + +// true + +type SettingsAnalyzeVariant interface { + SettingsAnalyzeCaster() *SettingsAnalyze +} + +func (s *SettingsAnalyze) SettingsAnalyzeCaster() *SettingsAnalyze { + return s +} diff --git a/typedapi/types/settingshighlight.go b/typedapi/types/settingshighlight.go index c9aff73435..b4fee8992e 100644 --- a/typedapi/types/settingshighlight.go +++ b/typedapi/types/settingshighlight.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SettingsHighlight type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L230-L233 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L238-L241 type SettingsHighlight struct { MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` } @@ -78,3 +78,13 @@ func NewSettingsHighlight() *SettingsHighlight { return r } + +// true + +type SettingsHighlightVariant interface { + SettingsHighlightCaster() *SettingsHighlight +} + +func (s *SettingsHighlight) SettingsHighlightCaster() *SettingsHighlight { + return s +} diff --git a/typedapi/types/settingsquerystring.go b/typedapi/types/settingsquerystring.go index 4a3de37939..fb8d9aff06 100644 --- a/typedapi/types/settingsquerystring.go +++ b/typedapi/types/settingsquerystring.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SettingsQueryString type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L250-L252 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L258-L260 type SettingsQueryString struct { Lenient Stringifiedboolean `json:"lenient"` } @@ -66,3 +66,13 @@ func NewSettingsQueryString() *SettingsQueryString { return r } + +// true + +type SettingsQueryStringVariant interface { + SettingsQueryStringCaster() *SettingsQueryString +} + +func (s *SettingsQueryString) SettingsQueryStringCaster() *SettingsQueryString { + return s +} diff --git a/typedapi/types/settingssearch.go b/typedapi/types/settingssearch.go index 473b58a317..9b6eb4aeb1 100644 --- a/typedapi/types/settingssearch.go +++ b/typedapi/types/settingssearch.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SettingsSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L240-L243 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L248-L251 type SettingsSearch struct { Idle *SearchIdle `json:"idle,omitempty"` Slowlog *SlowlogSettings `json:"slowlog,omitempty"` @@ -34,3 +34,13 @@ func NewSettingsSearch() *SettingsSearch { return r } + +// true + +type SettingsSearchVariant interface { + SettingsSearchCaster() *SettingsSearch +} + +func (s *SettingsSearch) SettingsSearchCaster() *SettingsSearch { + return s +} diff --git a/typedapi/types/settingssimilarity.go b/typedapi/types/settingssimilarity.go index 458137d05e..d5f61e9981 100644 --- a/typedapi/types/settingssimilarity.go +++ b/typedapi/types/settingssimilarity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,5 +31,9 @@ package types // SettingsSimilarityLmj // SettingsSimilarityScripted // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L171-L182 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L178-L190 type SettingsSimilarity any + +type SettingsSimilarityVariant interface { + SettingsSimilarityCaster() *SettingsSimilarity +} diff --git a/typedapi/types/settingssimilaritybm25.go b/typedapi/types/settingssimilaritybm25.go index 7fac335ca3..5358ca3a21 100644 --- a/typedapi/types/settingssimilaritybm25.go +++ b/typedapi/types/settingssimilaritybm25.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SettingsSimilarityBm25 type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L188-L193 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L196-L201 type SettingsSimilarityBm25 struct { B *Float64 `json:"b,omitempty"` DiscountOverlaps *bool `json:"discount_overlaps,omitempty"` @@ -131,3 +131,13 @@ func NewSettingsSimilarityBm25() *SettingsSimilarityBm25 { return r } + +// true + +type SettingsSimilarityBm25Variant interface { + SettingsSimilarityBm25Caster() *SettingsSimilarityBm25 +} + +func (s *SettingsSimilarityBm25) SettingsSimilarityBm25Caster() *SettingsSimilarityBm25 { + return s +} diff --git a/typedapi/types/settingssimilarityboolean.go b/typedapi/types/settingssimilarityboolean.go index 8944f0a892..4f378dafee 100644 --- a/typedapi/types/settingssimilarityboolean.go +++ b/typedapi/types/settingssimilarityboolean.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // SettingsSimilarityBoolean type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L184-L186 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L192-L194 type SettingsSimilarityBoolean struct { Type string `json:"type,omitempty"` } @@ -49,3 +49,13 @@ func NewSettingsSimilarityBoolean() *SettingsSimilarityBoolean { return r } + +// true + +type SettingsSimilarityBooleanVariant interface { + SettingsSimilarityBooleanCaster() *SettingsSimilarityBoolean +} + +func (s *SettingsSimilarityBoolean) SettingsSimilarityBooleanCaster() *SettingsSimilarityBoolean { + return s +} diff --git a/typedapi/types/settingssimilaritydfi.go b/typedapi/types/settingssimilaritydfi.go index d7cd71720f..93232955bc 100644 --- a/typedapi/types/settingssimilaritydfi.go +++ b/typedapi/types/settingssimilaritydfi.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,7 +28,7 @@ import ( // SettingsSimilarityDfi type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L195-L198 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L203-L206 type SettingsSimilarityDfi struct { IndependenceMeasure dfiindependencemeasure.DFIIndependenceMeasure `json:"independence_measure"` Type string `json:"type,omitempty"` @@ -53,3 +53,13 @@ func NewSettingsSimilarityDfi() *SettingsSimilarityDfi { return r } + +// true + +type SettingsSimilarityDfiVariant interface { + SettingsSimilarityDfiCaster() *SettingsSimilarityDfi +} + +func (s *SettingsSimilarityDfi) SettingsSimilarityDfiCaster() *SettingsSimilarityDfi { + return s +} diff --git a/typedapi/types/settingssimilaritydfr.go b/typedapi/types/settingssimilaritydfr.go index ae50183fa8..14927ca937 100644 --- a/typedapi/types/settingssimilaritydfr.go +++ b/typedapi/types/settingssimilaritydfr.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SettingsSimilarityDfr type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L200-L205 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L208-L213 type SettingsSimilarityDfr struct { AfterEffect dfraftereffect.DFRAfterEffect `json:"after_effect"` BasicModel dfrbasicmodel.DFRBasicModel `json:"basic_model"` @@ -59,3 +59,13 @@ func NewSettingsSimilarityDfr() *SettingsSimilarityDfr { return r } + +// true + +type SettingsSimilarityDfrVariant interface { + SettingsSimilarityDfrCaster() *SettingsSimilarityDfr +} + +func (s *SettingsSimilarityDfr) SettingsSimilarityDfrCaster() *SettingsSimilarityDfr { + return s +} diff --git a/typedapi/types/settingssimilarityib.go b/typedapi/types/settingssimilarityib.go index fae7305011..66272fabe6 100644 --- a/typedapi/types/settingssimilarityib.go +++ b/typedapi/types/settingssimilarityib.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SettingsSimilarityIb type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L207-L212 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L215-L220 type SettingsSimilarityIb struct { Distribution ibdistribution.IBDistribution `json:"distribution"` Lambda iblambda.IBLambda `json:"lambda"` @@ -59,3 +59,13 @@ func NewSettingsSimilarityIb() *SettingsSimilarityIb { return r } + +// true + +type SettingsSimilarityIbVariant interface { + SettingsSimilarityIbCaster() *SettingsSimilarityIb +} + +func (s *SettingsSimilarityIb) SettingsSimilarityIbCaster() *SettingsSimilarityIb { + return s +} diff --git a/typedapi/types/settingssimilaritylmd.go b/typedapi/types/settingssimilaritylmd.go index 94fb16e47e..ce7907b031 100644 --- a/typedapi/types/settingssimilaritylmd.go +++ b/typedapi/types/settingssimilaritylmd.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SettingsSimilarityLmd type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L214-L217 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L222-L225 type SettingsSimilarityLmd struct { Mu *Float64 `json:"mu,omitempty"` Type string `json:"type,omitempty"` @@ -97,3 +97,13 @@ func NewSettingsSimilarityLmd() *SettingsSimilarityLmd { return r } + +// true + +type SettingsSimilarityLmdVariant interface { + SettingsSimilarityLmdCaster() *SettingsSimilarityLmd +} + +func (s *SettingsSimilarityLmd) SettingsSimilarityLmdCaster() *SettingsSimilarityLmd { + return s +} diff --git a/typedapi/types/settingssimilaritylmj.go b/typedapi/types/settingssimilaritylmj.go index fa9a4febf9..e92c9ffe5b 100644 --- a/typedapi/types/settingssimilaritylmj.go +++ b/typedapi/types/settingssimilaritylmj.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SettingsSimilarityLmj type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L219-L222 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L227-L230 type SettingsSimilarityLmj struct { Lambda *Float64 `json:"lambda,omitempty"` Type string `json:"type,omitempty"` @@ -97,3 +97,13 @@ func NewSettingsSimilarityLmj() *SettingsSimilarityLmj { return r } + +// true + +type SettingsSimilarityLmjVariant interface { + SettingsSimilarityLmjCaster() *SettingsSimilarityLmj +} + +func (s *SettingsSimilarityLmj) SettingsSimilarityLmjCaster() *SettingsSimilarityLmj { + return s +} diff --git a/typedapi/types/settingssimilarityscripted.go b/typedapi/types/settingssimilarityscripted.go index efccd1e559..055268b69d 100644 --- a/typedapi/types/settingssimilarityscripted.go +++ b/typedapi/types/settingssimilarityscripted.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // SettingsSimilarityScripted type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L224-L228 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L232-L236 type SettingsSimilarityScripted struct { Script Script `json:"script"` Type string `json:"type,omitempty"` @@ -53,3 +53,13 @@ func NewSettingsSimilarityScripted() *SettingsSimilarityScripted { return r } + +// true + +type SettingsSimilarityScriptedVariant interface { + SettingsSimilarityScriptedCaster() *SettingsSimilarityScripted +} + +func (s *SettingsSimilarityScripted) SettingsSimilarityScriptedCaster() *SettingsSimilarityScripted { + return s +} diff --git a/typedapi/types/shapefieldquery.go b/typedapi/types/shapefieldquery.go index cecee434ad..b39da8f8ff 100644 --- a/typedapi/types/shapefieldquery.go +++ b/typedapi/types/shapefieldquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // ShapeFieldQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L366-L379 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L383-L396 type ShapeFieldQuery struct { // IndexedShape Queries using a pre-indexed shape. IndexedShape *FieldLookup `json:"indexed_shape,omitempty"` @@ -84,3 +84,13 @@ func NewShapeFieldQuery() *ShapeFieldQuery { return r } + +// true + +type ShapeFieldQueryVariant interface { + ShapeFieldQueryCaster() *ShapeFieldQuery +} + +func (s *ShapeFieldQuery) ShapeFieldQueryCaster() *ShapeFieldQuery { + return s +} diff --git a/typedapi/types/shapeproperty.go b/typedapi/types/shapeproperty.go index f7e10af989..9b4687ecd1 100644 --- a/typedapi/types/shapeproperty.go +++ b/typedapi/types/shapeproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,11 +30,12 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // ShapeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/geo.ts#L73-L85 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/geo.ts#L73-L85 type ShapeProperty struct { Coerce *bool `json:"coerce,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -45,12 +46,12 @@ type ShapeProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` IgnoreZValue *bool `json:"ignore_z_value,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *ShapeProperty) UnmarshalJSON(data []byte) error { @@ -136,301 +137,313 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -512,318 +525,318 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -838,6 +851,11 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -852,20 +870,20 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { func (s ShapeProperty) MarshalJSON() ([]byte, error) { type innerShapeProperty ShapeProperty tmp := innerShapeProperty{ - Coerce: s.Coerce, - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - IgnoreMalformed: s.IgnoreMalformed, - IgnoreZValue: s.IgnoreZValue, - Meta: s.Meta, - Orientation: s.Orientation, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + Orientation: s.Orientation, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "shape" @@ -876,10 +894,20 @@ func (s ShapeProperty) MarshalJSON() ([]byte, error) { // NewShapeProperty returns a ShapeProperty. func NewShapeProperty() *ShapeProperty { r := &ShapeProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ShapePropertyVariant interface { + ShapePropertyCaster() *ShapeProperty +} + +func (s *ShapeProperty) ShapePropertyCaster() *ShapeProperty { + return s +} diff --git a/typedapi/types/shapequery.go b/typedapi/types/shapequery.go index 4e7ab807c8..c0c57e4368 100644 --- a/typedapi/types/shapequery.go +++ b/typedapi/types/shapequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShapeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L351-L364 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L367-L381 type ShapeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -153,8 +153,18 @@ func (s ShapeQuery) MarshalJSON() ([]byte, error) { // NewShapeQuery returns a ShapeQuery. func NewShapeQuery() *ShapeQuery { r := &ShapeQuery{ - ShapeQuery: make(map[string]ShapeFieldQuery, 0), + ShapeQuery: make(map[string]ShapeFieldQuery), } return r } + +// true + +type ShapeQueryVariant interface { + ShapeQueryCaster() *ShapeQuery +} + +func (s *ShapeQuery) ShapeQueryCaster() *ShapeQuery { + return s +} diff --git a/typedapi/types/shardcommit.go b/typedapi/types/shardcommit.go index 8e1de2b364..97c8bdaac0 100644 --- a/typedapi/types/shardcommit.go +++ b/typedapi/types/shardcommit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardCommit type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L112-L117 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L112-L117 type ShardCommit struct { Generation int `json:"generation"` Id string `json:"id"` @@ -106,8 +106,10 @@ func (s *ShardCommit) UnmarshalJSON(data []byte) error { // NewShardCommit returns a ShardCommit. func NewShardCommit() *ShardCommit { r := &ShardCommit{ - UserData: make(map[string]string, 0), + UserData: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/shardfailure.go b/typedapi/types/shardfailure.go index 06a24d1ab2..47391508c9 100644 --- a/typedapi/types/shardfailure.go +++ b/typedapi/types/shardfailure.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Errors.ts#L52-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Errors.ts#L52-L58 type ShardFailure struct { Index *string `json:"index,omitempty"` Node *string `json:"node,omitempty"` @@ -116,3 +116,5 @@ func NewShardFailure() *ShardFailure { return r } + +// false diff --git a/typedapi/types/shardfilesizeinfo.go b/typedapi/types/shardfilesizeinfo.go index 13b9c0e9c9..bf2c60871a 100644 --- a/typedapi/types/shardfilesizeinfo.go +++ b/typedapi/types/shardfilesizeinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardFileSizeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L124-L131 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L124-L131 type ShardFileSizeInfo struct { AverageSizeInBytes *int64 `json:"average_size_in_bytes,omitempty"` Count *int64 `json:"count,omitempty"` @@ -154,3 +154,5 @@ func NewShardFileSizeInfo() *ShardFileSizeInfo { return r } + +// false diff --git a/typedapi/types/shardhealthstats.go b/typedapi/types/shardhealthstats.go index 34de5057a5..93019d8924 100644 --- a/typedapi/types/shardhealthstats.go +++ b/typedapi/types/shardhealthstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,14 +33,15 @@ import ( // ShardHealthStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/health/types.ts#L36-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/health/types.ts#L37-L45 type ShardHealthStats struct { - ActiveShards int `json:"active_shards"` - InitializingShards int `json:"initializing_shards"` - PrimaryActive bool `json:"primary_active"` - RelocatingShards int `json:"relocating_shards"` - Status healthstatus.HealthStatus `json:"status"` - UnassignedShards int `json:"unassigned_shards"` + ActiveShards int `json:"active_shards"` + InitializingShards int `json:"initializing_shards"` + PrimaryActive bool `json:"primary_active"` + RelocatingShards int `json:"relocating_shards"` + Status healthstatus.HealthStatus `json:"status"` + UnassignedPrimaryShards int `json:"unassigned_primary_shards"` + UnassignedShards int `json:"unassigned_shards"` } func (s *ShardHealthStats) UnmarshalJSON(data []byte) error { @@ -125,6 +126,22 @@ func (s *ShardHealthStats) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Status", err) } + case "unassigned_primary_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "UnassignedPrimaryShards", err) + } + s.UnassignedPrimaryShards = value + case float64: + f := int(v) + s.UnassignedPrimaryShards = f + } + case "unassigned_shards": var tmp any @@ -152,3 +169,5 @@ func NewShardHealthStats() *ShardHealthStats { return r } + +// false diff --git a/typedapi/types/shardlease.go b/typedapi/types/shardlease.go index 9c583ffb93..e300d02480 100644 --- a/typedapi/types/shardlease.go +++ b/typedapi/types/shardlease.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardLease type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L133-L138 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L133-L138 type ShardLease struct { Id string `json:"id"` RetainingSeqNo int64 `json:"retaining_seq_no"` @@ -102,3 +102,5 @@ func NewShardLease() *ShardLease { return r } + +// false diff --git a/typedapi/types/shardmigrationstatus.go b/typedapi/types/shardmigrationstatus.go index a718f6221a..7d90135570 100644 --- a/typedapi/types/shardmigrationstatus.go +++ b/typedapi/types/shardmigrationstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // ShardMigrationStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L52-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L52-L54 type ShardMigrationStatus struct { Status shutdownstatus.ShutdownStatus `json:"status"` } @@ -37,3 +37,5 @@ func NewShardMigrationStatus() *ShardMigrationStatus { return r } + +// false diff --git a/typedapi/types/shardpath.go b/typedapi/types/shardpath.go index bcdaef1859..bcd2c8c231 100644 --- a/typedapi/types/shardpath.go +++ b/typedapi/types/shardpath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardPath type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L140-L144 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L140-L144 type ShardPath struct { DataPath string `json:"data_path"` IsCustomDataPath bool `json:"is_custom_data_path"` @@ -102,3 +102,5 @@ func NewShardPath() *ShardPath { return r } + +// false diff --git a/typedapi/types/shardprofile.go b/typedapi/types/shardprofile.go index 3b3702d8d7..5ec3d87518 100644 --- a/typedapi/types/shardprofile.go +++ b/typedapi/types/shardprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,12 +31,17 @@ import ( // ShardProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/profile.ts#L132-L137 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/profile.ts#L142-L152 type ShardProfile struct { Aggregations []AggregationProfile `json:"aggregations"` + Cluster string `json:"cluster"` + Dfs *DfsProfile `json:"dfs,omitempty"` Fetch *FetchProfile `json:"fetch,omitempty"` Id string `json:"id"` + Index string `json:"index"` + NodeId string `json:"node_id"` Searches []SearchProfile `json:"searches"` + ShardId int64 `json:"shard_id"` } func (s *ShardProfile) UnmarshalJSON(data []byte) error { @@ -59,6 +64,23 @@ func (s *ShardProfile) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Aggregations", err) } + case "cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Cluster", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cluster = o + + case "dfs": + if err := dec.Decode(&s.Dfs); err != nil { + return fmt.Errorf("%s | %w", "Dfs", err) + } + case "fetch": if err := dec.Decode(&s.Fetch); err != nil { return fmt.Errorf("%s | %w", "Fetch", err) @@ -76,11 +98,36 @@ func (s *ShardProfile) UnmarshalJSON(data []byte) error { } s.Id = o + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + case "searches": if err := dec.Decode(&s.Searches); err != nil { return fmt.Errorf("%s | %w", "Searches", err) } + case "shard_id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShardId", err) + } + s.ShardId = value + case float64: + f := int64(v) + s.ShardId = f + } + } } return nil @@ -92,3 +139,5 @@ func NewShardProfile() *ShardProfile { return r } + +// false diff --git a/typedapi/types/shardquerycache.go b/typedapi/types/shardquerycache.go index 6108690e73..0a49c303e7 100644 --- a/typedapi/types/shardquerycache.go +++ b/typedapi/types/shardquerycache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardQueryCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L146-L154 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L146-L154 type ShardQueryCache struct { CacheCount int64 `json:"cache_count"` CacheSize int64 `json:"cache_size"` @@ -173,3 +173,5 @@ func NewShardQueryCache() *ShardQueryCache { return r } + +// false diff --git a/typedapi/types/shardrecovery.go b/typedapi/types/shardrecovery.go index a3ba13286d..ee4ba45f98 100644 --- a/typedapi/types/shardrecovery.go +++ b/typedapi/types/shardrecovery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardRecovery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L118-L135 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L118-L135 type ShardRecovery struct { Id int64 `json:"id"` Index RecoveryIndexStatus `json:"index"` @@ -190,3 +190,5 @@ func NewShardRecovery() *ShardRecovery { return r } + +// false diff --git a/typedapi/types/shardretentionleases.go b/typedapi/types/shardretentionleases.go index c59b8cd028..303dd11fcb 100644 --- a/typedapi/types/shardretentionleases.go +++ b/typedapi/types/shardretentionleases.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardRetentionLeases type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L156-L160 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L156-L160 type ShardRetentionLeases struct { Leases []ShardLease `json:"leases"` PrimaryTerm int64 `json:"primary_term"` @@ -89,3 +89,5 @@ func NewShardRetentionLeases() *ShardRetentionLeases { return r } + +// false diff --git a/typedapi/types/shardrouting.go b/typedapi/types/shardrouting.go index a8ff479b16..c31d26e140 100644 --- a/typedapi/types/shardrouting.go +++ b/typedapi/types/shardrouting.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // ShardRouting type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L162-L167 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L162-L167 type ShardRouting struct { Node string `json:"node"` Primary bool `json:"primary"` @@ -110,3 +110,5 @@ func NewShardRouting() *ShardRouting { return r } + +// false diff --git a/typedapi/types/shardsavailabilityindicator.go b/typedapi/types/shardsavailabilityindicator.go index a58bbc5a40..fd77530071 100644 --- a/typedapi/types/shardsavailabilityindicator.go +++ b/typedapi/types/shardsavailabilityindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // ShardsAvailabilityIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L104-L108 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L105-L109 type ShardsAvailabilityIndicator struct { Details *ShardsAvailabilityIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewShardsAvailabilityIndicator() *ShardsAvailabilityIndicator { return r } + +// false diff --git a/typedapi/types/shardsavailabilityindicatordetails.go b/typedapi/types/shardsavailabilityindicatordetails.go index a10c3e7aa6..ff2f9170d4 100644 --- a/typedapi/types/shardsavailabilityindicatordetails.go +++ b/typedapi/types/shardsavailabilityindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardsAvailabilityIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L109-L120 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L110-L121 type ShardsAvailabilityIndicatorDetails struct { CreatingPrimaries int64 `json:"creating_primaries"` CreatingReplicas int64 `json:"creating_replicas"` @@ -221,3 +221,5 @@ func NewShardsAvailabilityIndicatorDetails() *ShardsAvailabilityIndicatorDetails return r } + +// false diff --git a/typedapi/types/shardscapacityindicator.go b/typedapi/types/shardscapacityindicator.go index 462df65b9f..0df645eeda 100644 --- a/typedapi/types/shardscapacityindicator.go +++ b/typedapi/types/shardscapacityindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // ShardsCapacityIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L173-L177 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L190-L194 type ShardsCapacityIndicator struct { Details *ShardsCapacityIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewShardsCapacityIndicator() *ShardsCapacityIndicator { return r } + +// false diff --git a/typedapi/types/shardscapacityindicatordetails.go b/typedapi/types/shardscapacityindicatordetails.go index 17c2df6599..83ede320ff 100644 --- a/typedapi/types/shardscapacityindicatordetails.go +++ b/typedapi/types/shardscapacityindicatordetails.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ShardsCapacityIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L179-L182 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L196-L199 type ShardsCapacityIndicatorDetails struct { Data ShardsCapacityIndicatorTierDetail `json:"data"` Frozen ShardsCapacityIndicatorTierDetail `json:"frozen"` @@ -34,3 +34,5 @@ func NewShardsCapacityIndicatorDetails() *ShardsCapacityIndicatorDetails { return r } + +// false diff --git a/typedapi/types/shardscapacityindicatortierdetail.go b/typedapi/types/shardscapacityindicatortierdetail.go index 4014afd777..bfb62842b7 100644 --- a/typedapi/types/shardscapacityindicatortierdetail.go +++ b/typedapi/types/shardscapacityindicatortierdetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardsCapacityIndicatorTierDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L184-L187 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L201-L204 type ShardsCapacityIndicatorTierDetail struct { CurrentUsedShards *int `json:"current_used_shards,omitempty"` MaxShardsInCluster int `json:"max_shards_in_cluster"` @@ -95,3 +95,5 @@ func NewShardsCapacityIndicatorTierDetail() *ShardsCapacityIndicatorTierDetail { return r } + +// false diff --git a/typedapi/types/shardsegmentrouting.go b/typedapi/types/shardsegmentrouting.go index bbfe394c95..ec9390543b 100644 --- a/typedapi/types/shardsegmentrouting.go +++ b/typedapi/types/shardsegmentrouting.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardSegmentRouting type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/segments/types.ts#L40-L44 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/segments/types.ts#L40-L44 type ShardSegmentRouting struct { Node string `json:"node"` Primary bool `json:"primary"` @@ -102,3 +102,5 @@ func NewShardSegmentRouting() *ShardSegmentRouting { return r } + +// false diff --git a/typedapi/types/shardsequencenumber.go b/typedapi/types/shardsequencenumber.go index 17a5f0943b..4284e03436 100644 --- a/typedapi/types/shardsequencenumber.go +++ b/typedapi/types/shardsequencenumber.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardSequenceNumber type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L176-L180 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L176-L180 type ShardSequenceNumber struct { GlobalCheckpoint int64 `json:"global_checkpoint"` LocalCheckpoint int64 `json:"local_checkpoint"` @@ -99,3 +99,5 @@ func NewShardSequenceNumber() *ShardSequenceNumber { return r } + +// false diff --git a/typedapi/types/shardsrecord.go b/typedapi/types/shardsrecord.go index da93d5f995..bb778943f9 100644 --- a/typedapi/types/shardsrecord.go +++ b/typedapi/types/shardsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/shards/types.ts#L20-L421 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/shards/types.ts#L20-L427 type ShardsRecord struct { // BulkAvgSizeInBytes The average size in bytes of shard bulk operations. BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` @@ -45,6 +45,8 @@ type ShardsRecord struct { BulkTotalTime *string `json:"bulk.total_time,omitempty"` // CompletionSize The size of completion. CompletionSize *string `json:"completion.size,omitempty"` + // Dataset total size of dataset (including the cache for partially mounted indices) + Dataset *string `json:"dataset,omitempty"` // Docs The number of documents in the shard. Docs *string `json:"docs,omitempty"` // FielddataEvictions The fielddata cache evictions. @@ -314,6 +316,18 @@ func (s *ShardsRecord) UnmarshalJSON(data []byte) error { } s.CompletionSize = &o + case "dataset": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Dataset", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Dataset = &o + case "docs", "d", "dc": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { @@ -1165,3 +1179,5 @@ func NewShardsRecord() *ShardsRecord { return r } + +// false diff --git a/typedapi/types/shardssegment.go b/typedapi/types/shardssegment.go index 51f22f6fc2..9fe2466c09 100644 --- a/typedapi/types/shardssegment.go +++ b/typedapi/types/shardssegment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardsSegment type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/segments/types.ts#L46-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/segments/types.ts#L46-L51 type ShardsSegment struct { NumCommittedSegments int `json:"num_committed_segments"` NumSearchSegments int `json:"num_search_segments"` @@ -107,8 +107,10 @@ func (s *ShardsSegment) UnmarshalJSON(data []byte) error { // NewShardsSegment returns a ShardsSegment. func NewShardsSegment() *ShardsSegment { r := &ShardsSegment{ - Segments: make(map[string]Segment, 0), + Segments: make(map[string]Segment), } return r } + +// false diff --git a/typedapi/types/shardsstatssummary.go b/typedapi/types/shardsstatssummary.go index eeb97a9fdc..176c8105bf 100644 --- a/typedapi/types/shardsstatssummary.go +++ b/typedapi/types/shardsstatssummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // ShardsStatsSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotShardsStatus.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotShardsStatus.ts#L29-L35 type ShardsStatsSummary struct { Incremental ShardsStatsSummaryItem `json:"incremental"` StartTimeInMillis int64 `json:"start_time_in_millis"` @@ -90,3 +90,5 @@ func NewShardsStatsSummary() *ShardsStatsSummary { return r } + +// false diff --git a/typedapi/types/shardsstatssummaryitem.go b/typedapi/types/shardsstatssummaryitem.go index ebad79c027..b6262095de 100644 --- a/typedapi/types/shardsstatssummaryitem.go +++ b/typedapi/types/shardsstatssummaryitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardsStatsSummaryItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotShardsStatus.ts#L37-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotShardsStatus.ts#L37-L40 type ShardsStatsSummaryItem struct { FileCount int64 `json:"file_count"` SizeInBytes int64 `json:"size_in_bytes"` @@ -93,3 +93,5 @@ func NewShardsStatsSummaryItem() *ShardsStatsSummaryItem { return r } + +// false diff --git a/typedapi/types/shardstatistics.go b/typedapi/types/shardstatistics.go index 5be6757889..3a18967770 100644 --- a/typedapi/types/shardstatistics.go +++ b/typedapi/types/shardstatistics.go @@ -16,20 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ShardStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L54-L66 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L54-L69 type ShardStatistics struct { + // Failed The number of shards the operation or search attempted to run on but failed. Failed uint `json:"failed"` Failures []ShardFailure `json:"failures,omitempty"` Skipped *uint `json:"skipped,omitempty"` - // Successful Indicates how many shards have successfully run the search. + // Successful The number of shards the operation or search succeeded on. Successful uint `json:"successful"` - // Total Indicates how many shards the search will run on overall. + // Total The number of shards the operation or search will run on overall. Total uint `json:"total"` } @@ -39,3 +40,5 @@ func NewShardStatistics() *ShardStatistics { return r } + +// false diff --git a/typedapi/types/shardstore.go b/typedapi/types/shardstore.go index 68f5e3a86f..ef5e7b2f50 100644 --- a/typedapi/types/shardstore.go +++ b/typedapi/types/shardstore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // ShardStore type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shard_stores/types.ts#L30-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shard_stores/types.ts#L29-L36 type ShardStore struct { Allocation shardstoreallocation.ShardStoreAllocation `json:"allocation"` AllocationId *string `json:"allocation_id,omitempty"` @@ -120,8 +120,10 @@ func (s ShardStore) MarshalJSON() ([]byte, error) { // NewShardStore returns a ShardStore. func NewShardStore() *ShardStore { r := &ShardStore{ - ShardStore: make(map[string]ShardStoreNode, 0), + ShardStore: make(map[string]ShardStoreNode), } return r } + +// false diff --git a/typedapi/types/shardstoreexception.go b/typedapi/types/shardstoreexception.go index d0a16536ee..1486f36645 100644 --- a/typedapi/types/shardstoreexception.go +++ b/typedapi/types/shardstoreexception.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardStoreException type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shard_stores/types.ts#L54-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shard_stores/types.ts#L53-L56 type ShardStoreException struct { Reason string `json:"reason"` Type string `json:"type"` @@ -87,3 +87,5 @@ func NewShardStoreException() *ShardStoreException { return r } + +// false diff --git a/typedapi/types/shardstoreindex.go b/typedapi/types/shardstoreindex.go index ea53311d96..4d6486415d 100644 --- a/typedapi/types/shardstoreindex.go +++ b/typedapi/types/shardstoreindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ShardStoreIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search_shards/SearchShardsResponse.ts#L33-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search_shards/SearchShardsResponse.ts#L62-L65 type ShardStoreIndex struct { Aliases []string `json:"aliases,omitempty"` Filter *Query `json:"filter,omitempty"` @@ -34,3 +34,5 @@ func NewShardStoreIndex() *ShardStoreIndex { return r } + +// false diff --git a/typedapi/types/shardstorenode.go b/typedapi/types/shardstorenode.go index 84ab24691e..975b9e9e36 100644 --- a/typedapi/types/shardstorenode.go +++ b/typedapi/types/shardstorenode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardStoreNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shard_stores/types.ts#L39-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shard_stores/types.ts#L38-L45 type ShardStoreNode struct { Attributes map[string]string `json:"attributes"` EphemeralId *string `json:"ephemeral_id,omitempty"` @@ -111,8 +111,10 @@ func (s *ShardStoreNode) UnmarshalJSON(data []byte) error { // NewShardStoreNode returns a ShardStoreNode. func NewShardStoreNode() *ShardStoreNode { r := &ShardStoreNode{ - Attributes: make(map[string]string, 0), + Attributes: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/shardstorewrapper.go b/typedapi/types/shardstorewrapper.go index 2222491da6..69800d26a0 100644 --- a/typedapi/types/shardstorewrapper.go +++ b/typedapi/types/shardstorewrapper.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // ShardStoreWrapper type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/shard_stores/types.ts#L59-L61 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/shard_stores/types.ts#L58-L60 type ShardStoreWrapper struct { Stores []ShardStore `json:"stores"` } @@ -33,3 +33,5 @@ func NewShardStoreWrapper() *ShardStoreWrapper { return r } + +// false diff --git a/typedapi/types/shardstotalstats.go b/typedapi/types/shardstotalstats.go index 66180f5234..625b5311ae 100644 --- a/typedapi/types/shardstotalstats.go +++ b/typedapi/types/shardstotalstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShardsTotalStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/stats/types.ts#L182-L184 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/stats/types.ts#L182-L184 type ShardsTotalStats struct { TotalCount int64 `json:"total_count"` } @@ -77,3 +77,5 @@ func NewShardsTotalStats() *ShardsTotalStats { return r } + +// false diff --git a/typedapi/types/shared.go b/typedapi/types/shared.go index f169c48660..7c9c1b113f 100644 --- a/typedapi/types/shared.go +++ b/typedapi/types/shared.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Shared type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/searchable_snapshots/cache_stats/Response.ts#L34-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/searchable_snapshots/cache_stats/Response.ts#L34-L43 type Shared struct { BytesReadInBytes ByteSize `json:"bytes_read_in_bytes"` BytesWrittenInBytes ByteSize `json:"bytes_written_in_bytes"` @@ -150,3 +150,5 @@ func NewShared() *Shared { return r } + +// false diff --git a/typedapi/types/sharedfilesystemrepository.go b/typedapi/types/sharedfilesystemrepository.go index 1b267a99e0..fdaf223b20 100644 --- a/typedapi/types/sharedfilesystemrepository.go +++ b/typedapi/types/sharedfilesystemrepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SharedFileSystemRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L55-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L55-L58 type SharedFileSystemRepository struct { Settings SharedFileSystemRepositorySettings `json:"settings"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewSharedFileSystemRepository() *SharedFileSystemRepository { return r } + +// true + +type SharedFileSystemRepositoryVariant interface { + SharedFileSystemRepositoryCaster() *SharedFileSystemRepository +} + +func (s *SharedFileSystemRepository) SharedFileSystemRepositoryCaster() *SharedFileSystemRepository { + return s +} diff --git a/typedapi/types/sharedfilesystemrepositorysettings.go b/typedapi/types/sharedfilesystemrepositorysettings.go index 38825b6f93..6a912ffa3e 100644 --- a/typedapi/types/sharedfilesystemrepositorysettings.go +++ b/typedapi/types/sharedfilesystemrepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SharedFileSystemRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L104-L108 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L104-L108 type SharedFileSystemRepositorySettings struct { ChunkSize ByteSize `json:"chunk_size,omitempty"` Compress *bool `json:"compress,omitempty"` @@ -139,3 +139,13 @@ func NewSharedFileSystemRepositorySettings() *SharedFileSystemRepositorySettings return r } + +// true + +type SharedFileSystemRepositorySettingsVariant interface { + SharedFileSystemRepositorySettingsCaster() *SharedFileSystemRepositorySettings +} + +func (s *SharedFileSystemRepositorySettings) SharedFileSystemRepositorySettingsCaster() *SharedFileSystemRepositorySettings { + return s +} diff --git a/typedapi/types/shingletokenfilter.go b/typedapi/types/shingletokenfilter.go index fc782f6ebb..fb49efd714 100644 --- a/typedapi/types/shingletokenfilter.go +++ b/typedapi/types/shingletokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShingleTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L87-L95 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L86-L94 type ShingleTokenFilter struct { FillerToken *string `json:"filler_token,omitempty"` MaxShingleSize string `json:"max_shingle_size,omitempty"` @@ -174,3 +174,13 @@ func NewShingleTokenFilter() *ShingleTokenFilter { return r } + +// true + +type ShingleTokenFilterVariant interface { + ShingleTokenFilterCaster() *ShingleTokenFilter +} + +func (s *ShingleTokenFilter) ShingleTokenFilterCaster() *ShingleTokenFilter { + return s +} diff --git a/typedapi/types/shortnumberproperty.go b/typedapi/types/shortnumberproperty.go index 29ebe45744..a0daa2a809 100644 --- a/typedapi/types/shortnumberproperty.go +++ b/typedapi/types/shortnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // ShortNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L167-L170 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L171-L174 type ShortNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,13 +48,13 @@ type ShortNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *int `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *int `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +163,313 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -543,301 +556,313 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -848,18 +873,6 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -874,6 +887,11 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -921,8 +939,8 @@ func (s ShortNumberProperty) MarshalJSON() ([]byte, error) { OnScriptError: s.OnScriptError, Properties: s.Properties, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -936,10 +954,20 @@ func (s ShortNumberProperty) MarshalJSON() ([]byte, error) { // NewShortNumberProperty returns a ShortNumberProperty. func NewShortNumberProperty() *ShortNumberProperty { r := &ShortNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type ShortNumberPropertyVariant interface { + ShortNumberPropertyCaster() *ShortNumberProperty +} + +func (s *ShortNumberProperty) ShortNumberPropertyCaster() *ShortNumberProperty { + return s +} diff --git a/typedapi/types/shrinkaction.go b/typedapi/types/shrinkaction.go index add7899bad..0516d0b20d 100644 --- a/typedapi/types/shrinkaction.go +++ b/typedapi/types/shrinkaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ShrinkAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L120-L124 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L117-L121 type ShrinkAction struct { AllowWriteAfterShrink *bool `json:"allow_write_after_shrink,omitempty"` MaxPrimaryShardSize ByteSize `json:"max_primary_shard_size,omitempty"` @@ -99,3 +99,13 @@ func NewShrinkAction() *ShrinkAction { return r } + +// true + +type ShrinkActionVariant interface { + ShrinkActionCaster() *ShrinkAction +} + +func (s *ShrinkAction) ShrinkActionCaster() *ShrinkAction { + return s +} diff --git a/typedapi/types/significantlongtermsaggregate.go b/typedapi/types/significantlongtermsaggregate.go index 9e8816ffb0..9cb8fc32ee 100644 --- a/typedapi/types/significantlongtermsaggregate.go +++ b/typedapi/types/significantlongtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SignificantLongTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L592-L594 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L668-L670 type SignificantLongTermsAggregate struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsSignificantLongTermsBucket `json:"buckets"` @@ -121,3 +121,5 @@ func NewSignificantLongTermsAggregate() *SignificantLongTermsAggregate { return r } + +// false diff --git a/typedapi/types/significantlongtermsbucket.go b/typedapi/types/significantlongtermsbucket.go index 9f215034d1..b26ebe6fea 100644 --- a/typedapi/types/significantlongtermsbucket.go +++ b/typedapi/types/significantlongtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // SignificantLongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L601-L604 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L677-L680 type SignificantLongTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` BgCount int64 `json:"bg_count"` @@ -547,6 +547,13 @@ func (s *SignificantLongTermsBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -680,8 +687,10 @@ func (s SignificantLongTermsBucket) MarshalJSON() ([]byte, error) { // NewSignificantLongTermsBucket returns a SignificantLongTermsBucket. func NewSignificantLongTermsBucket() *SignificantLongTermsBucket { r := &SignificantLongTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/significantstringtermsaggregate.go b/typedapi/types/significantstringtermsaggregate.go index 4d54c792a7..4b3ca9a11a 100644 --- a/typedapi/types/significantstringtermsaggregate.go +++ b/typedapi/types/significantstringtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SignificantStringTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L606-L608 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L682-L684 type SignificantStringTermsAggregate struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsSignificantStringTermsBucket `json:"buckets"` @@ -121,3 +121,5 @@ func NewSignificantStringTermsAggregate() *SignificantStringTermsAggregate { return r } + +// false diff --git a/typedapi/types/significantstringtermsbucket.go b/typedapi/types/significantstringtermsbucket.go index dc03092e33..bfa4e18678 100644 --- a/typedapi/types/significantstringtermsbucket.go +++ b/typedapi/types/significantstringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // SignificantStringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L610-L612 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L686-L688 type SignificantStringTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` BgCount int64 `json:"bg_count"` @@ -531,6 +531,13 @@ func (s *SignificantStringTermsBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -664,8 +671,10 @@ func (s SignificantStringTermsBucket) MarshalJSON() ([]byte, error) { // NewSignificantStringTermsBucket returns a SignificantStringTermsBucket. func NewSignificantStringTermsBucket() *SignificantStringTermsBucket { r := &SignificantStringTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go b/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go deleted file mode 100644 index aaeb6f914d..0000000000 --- a/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// SignificantTermsAggregateBaseSignificantLongTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L585-L590 -type SignificantTermsAggregateBaseSignificantLongTermsBucket struct { - BgCount *int64 `json:"bg_count,omitempty"` - Buckets BucketsSignificantLongTermsBucket `json:"buckets"` - DocCount *int64 `json:"doc_count,omitempty"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *SignificantTermsAggregateBaseSignificantLongTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "bg_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "BgCount", err) - } - s.BgCount = &value - case float64: - f := int64(v) - s.BgCount = &f - } - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]SignificantLongTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []SignificantLongTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCount", err) - } - s.DocCount = &value - case float64: - f := int64(v) - s.DocCount = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewSignificantTermsAggregateBaseSignificantLongTermsBucket returns a SignificantTermsAggregateBaseSignificantLongTermsBucket. -func NewSignificantTermsAggregateBaseSignificantLongTermsBucket() *SignificantTermsAggregateBaseSignificantLongTermsBucket { - r := &SignificantTermsAggregateBaseSignificantLongTermsBucket{} - - return r -} diff --git a/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go b/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go deleted file mode 100644 index e2abf3b080..0000000000 --- a/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// SignificantTermsAggregateBaseSignificantStringTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L585-L590 -type SignificantTermsAggregateBaseSignificantStringTermsBucket struct { - BgCount *int64 `json:"bg_count,omitempty"` - Buckets BucketsSignificantStringTermsBucket `json:"buckets"` - DocCount *int64 `json:"doc_count,omitempty"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *SignificantTermsAggregateBaseSignificantStringTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "bg_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "BgCount", err) - } - s.BgCount = &value - case float64: - f := int64(v) - s.BgCount = &f - } - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]SignificantStringTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []SignificantStringTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCount", err) - } - s.DocCount = &value - case float64: - f := int64(v) - s.DocCount = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewSignificantTermsAggregateBaseSignificantStringTermsBucket returns a SignificantTermsAggregateBaseSignificantStringTermsBucket. -func NewSignificantTermsAggregateBaseSignificantStringTermsBucket() *SignificantTermsAggregateBaseSignificantStringTermsBucket { - r := &SignificantTermsAggregateBaseSignificantStringTermsBucket{} - - return r -} diff --git a/typedapi/types/significanttermsaggregatebasevoid.go b/typedapi/types/significanttermsaggregatebasevoid.go deleted file mode 100644 index 7500247fa7..0000000000 --- a/typedapi/types/significanttermsaggregatebasevoid.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// SignificantTermsAggregateBaseVoid type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L585-L590 -type SignificantTermsAggregateBaseVoid struct { - BgCount *int64 `json:"bg_count,omitempty"` - Buckets BucketsVoid `json:"buckets"` - DocCount *int64 `json:"doc_count,omitempty"` - Meta Metadata `json:"meta,omitempty"` -} - -func (s *SignificantTermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "bg_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "BgCount", err) - } - s.BgCount = &value - case float64: - f := int64(v) - s.BgCount = &f - } - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]any, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []any{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCount", err) - } - s.DocCount = &value - case float64: - f := int64(v) - s.DocCount = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - } - } - return nil -} - -// NewSignificantTermsAggregateBaseVoid returns a SignificantTermsAggregateBaseVoid. -func NewSignificantTermsAggregateBaseVoid() *SignificantTermsAggregateBaseVoid { - r := &SignificantTermsAggregateBaseVoid{} - - return r -} diff --git a/typedapi/types/significanttermsaggregation.go b/typedapi/types/significanttermsaggregation.go index 9c4856c46c..c809ae754a 100644 --- a/typedapi/types/significanttermsaggregation.go +++ b/typedapi/types/significanttermsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SignificantTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L772-L836 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L817-L884 type SignificantTermsAggregation struct { // BackgroundFilter A background filter that can be used to focus in on significant terms within // a narrower context, instead of the entire index. @@ -263,3 +263,13 @@ func NewSignificantTermsAggregation() *SignificantTermsAggregation { return r } + +// true + +type SignificantTermsAggregationVariant interface { + SignificantTermsAggregationCaster() *SignificantTermsAggregation +} + +func (s *SignificantTermsAggregation) SignificantTermsAggregationCaster() *SignificantTermsAggregation { + return s +} diff --git a/typedapi/types/significanttextaggregation.go b/typedapi/types/significanttextaggregation.go index 5c315fbaf7..8eb7dfa177 100644 --- a/typedapi/types/significanttextaggregation.go +++ b/typedapi/types/significanttextaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SignificantTextAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L838-L910 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L886-L961 type SignificantTextAggregation struct { // BackgroundFilter A background filter that can be used to focus in on significant terms within // a narrower context, instead of the entire index. @@ -297,3 +297,13 @@ func NewSignificantTextAggregation() *SignificantTextAggregation { return r } + +// true + +type SignificantTextAggregationVariant interface { + SignificantTextAggregationCaster() *SignificantTextAggregation +} + +func (s *SignificantTextAggregation) SignificantTextAggregationCaster() *SignificantTextAggregation { + return s +} diff --git a/typedapi/types/simpleanalyzer.go b/typedapi/types/simpleanalyzer.go index cf5a4b9bfe..1d7bdfff2d 100644 --- a/typedapi/types/simpleanalyzer.go +++ b/typedapi/types/simpleanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SimpleAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L83-L86 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L320-L323 type SimpleAnalyzer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewSimpleAnalyzer() *SimpleAnalyzer { return r } + +// true + +type SimpleAnalyzerVariant interface { + SimpleAnalyzerCaster() *SimpleAnalyzer +} + +func (s *SimpleAnalyzer) SimpleAnalyzerCaster() *SimpleAnalyzer { + return s +} diff --git a/typedapi/types/simplemovingaverageaggregation.go b/typedapi/types/simplemovingaverageaggregation.go index 75c2b4f779..5325b3b1f7 100644 --- a/typedapi/types/simplemovingaverageaggregation.go +++ b/typedapi/types/simplemovingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SimpleMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L247-L250 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L273-L276 type SimpleMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -173,3 +173,13 @@ func NewSimpleMovingAverageAggregation() *SimpleMovingAverageAggregation { return r } + +// true + +type SimpleMovingAverageAggregationVariant interface { + SimpleMovingAverageAggregationCaster() *SimpleMovingAverageAggregation +} + +func (s *SimpleMovingAverageAggregation) SimpleMovingAverageAggregationCaster() *SimpleMovingAverageAggregation { + return s +} diff --git a/typedapi/types/simplepatternsplittokenizer.go b/typedapi/types/simplepatternsplittokenizer.go new file mode 100644 index 0000000000..e90e3bd387 --- /dev/null +++ b/typedapi/types/simplepatternsplittokenizer.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SimplePatternSplitTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L116-L119 +type SimplePatternSplitTokenizer struct { + Pattern *string `json:"pattern,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SimplePatternSplitTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SimplePatternSplitTokenizer) MarshalJSON() ([]byte, error) { + type innerSimplePatternSplitTokenizer SimplePatternSplitTokenizer + tmp := innerSimplePatternSplitTokenizer{ + Pattern: s.Pattern, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "simple_pattern_split" + + return json.Marshal(tmp) +} + +// NewSimplePatternSplitTokenizer returns a SimplePatternSplitTokenizer. +func NewSimplePatternSplitTokenizer() *SimplePatternSplitTokenizer { + r := &SimplePatternSplitTokenizer{} + + return r +} + +// true + +type SimplePatternSplitTokenizerVariant interface { + SimplePatternSplitTokenizerCaster() *SimplePatternSplitTokenizer +} + +func (s *SimplePatternSplitTokenizer) SimplePatternSplitTokenizerCaster() *SimplePatternSplitTokenizer { + return s +} diff --git a/typedapi/types/simplepatterntokenizer.go b/typedapi/types/simplepatterntokenizer.go new file mode 100644 index 0000000000..4604994bba --- /dev/null +++ b/typedapi/types/simplepatterntokenizer.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SimplePatternTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L111-L114 +type SimplePatternTokenizer struct { + Pattern *string `json:"pattern,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SimplePatternTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SimplePatternTokenizer) MarshalJSON() ([]byte, error) { + type innerSimplePatternTokenizer SimplePatternTokenizer + tmp := innerSimplePatternTokenizer{ + Pattern: s.Pattern, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "simple_pattern" + + return json.Marshal(tmp) +} + +// NewSimplePatternTokenizer returns a SimplePatternTokenizer. +func NewSimplePatternTokenizer() *SimplePatternTokenizer { + r := &SimplePatternTokenizer{} + + return r +} + +// true + +type SimplePatternTokenizerVariant interface { + SimplePatternTokenizerCaster() *SimplePatternTokenizer +} + +func (s *SimplePatternTokenizer) SimplePatternTokenizerCaster() *SimplePatternTokenizer { + return s +} diff --git a/typedapi/types/simplequerystringflags.go b/typedapi/types/simplequerystringflags.go index 546ade9661..80bd1fbcd1 100644 --- a/typedapi/types/simplequerystringflags.go +++ b/typedapi/types/simplequerystringflags.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SimpleQueryStringFlags type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L702-L706 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L723-L727 type SimpleQueryStringFlags PipeSeparatedFlagsSimpleQueryStringFlag + +type SimpleQueryStringFlagsVariant interface { + SimpleQueryStringFlagsCaster() *SimpleQueryStringFlags +} diff --git a/typedapi/types/simplequerystringquery.go b/typedapi/types/simplequerystringquery.go index 865c697055..81f7fd88f6 100644 --- a/typedapi/types/simplequerystringquery.go +++ b/typedapi/types/simplequerystringquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SimpleQueryStringQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/fulltext.ts#L765-L830 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/fulltext.ts#L786-L854 type SimpleQueryStringQuery struct { // AnalyzeWildcard If `true`, the query attempts to analyze wildcard terms in the query string. AnalyzeWildcard *bool `json:"analyze_wildcard,omitempty"` @@ -278,3 +278,13 @@ func NewSimpleQueryStringQuery() *SimpleQueryStringQuery { return r } + +// true + +type SimpleQueryStringQueryVariant interface { + SimpleQueryStringQueryCaster() *SimpleQueryStringQuery +} + +func (s *SimpleQueryStringQuery) SimpleQueryStringQueryCaster() *SimpleQueryStringQuery { + return s +} diff --git a/typedapi/types/simplevalueaggregate.go b/typedapi/types/simplevalueaggregate.go index dd325c67de..8b2897e7f2 100644 --- a/typedapi/types/simplevalueaggregate.go +++ b/typedapi/types/simplevalueaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SimpleValueAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L224-L225 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L238-L239 type SimpleValueAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewSimpleValueAggregate() *SimpleValueAggregate { return r } + +// false diff --git a/typedapi/types/simulatedactions.go b/typedapi/types/simulatedactions.go index e63707bc53..778799ebb0 100644 --- a/typedapi/types/simulatedactions.go +++ b/typedapi/types/simulatedactions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SimulatedActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L96-L100 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L90-L94 type SimulatedActions struct { Actions []string `json:"actions"` All *SimulatedActions `json:"all,omitempty"` @@ -88,3 +88,13 @@ func NewSimulatedActions() *SimulatedActions { return r } + +// true + +type SimulatedActionsVariant interface { + SimulatedActionsCaster() *SimulatedActions +} + +func (s *SimulatedActions) SimulatedActionsCaster() *SimulatedActions { + return s +} diff --git a/typedapi/types/inferenceresult.go b/typedapi/types/simulatedocumentresult.go similarity index 52% rename from typedapi/types/inferenceresult.go rename to typedapi/types/simulatedocumentresult.go index 1c063229b4..bae5441697 100644 --- a/typedapi/types/inferenceresult.go +++ b/typedapi/types/simulatedocumentresult.go @@ -16,24 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types -// InferenceResult type. +// SimulateDocumentResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Results.ts#L79-L89 -type InferenceResult struct { - Completion []CompletionResult `json:"completion,omitempty"` - Rerank []RankedDocument `json:"rerank,omitempty"` - SparseEmbedding []SparseEmbeddingResult `json:"sparse_embedding,omitempty"` - TextEmbedding []TextEmbeddingResult `json:"text_embedding,omitempty"` - TextEmbeddingBytes []TextEmbeddingByteResult `json:"text_embedding_bytes,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Simulation.ts#L46-L50 +type SimulateDocumentResult struct { + Doc *DocumentSimulation `json:"doc,omitempty"` + Error *ErrorCause `json:"error,omitempty"` + ProcessorResults []PipelineSimulation `json:"processor_results,omitempty"` } -// NewInferenceResult returns a InferenceResult. -func NewInferenceResult() *InferenceResult { - r := &InferenceResult{} +// NewSimulateDocumentResult returns a SimulateDocumentResult. +func NewSimulateDocumentResult() *SimulateDocumentResult { + r := &SimulateDocumentResult{} return r } + +// false diff --git a/typedapi/types/simulateingestdocumentresult.go b/typedapi/types/simulateingestdocumentresult.go new file mode 100644 index 0000000000..c7bd2eccad --- /dev/null +++ b/typedapi/types/simulateingestdocumentresult.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// SimulateIngestDocumentResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/simulate/ingest/SimulateIngestResponse.ts#L31-L33 +type SimulateIngestDocumentResult struct { + Doc *IngestDocumentSimulation `json:"doc,omitempty"` +} + +// NewSimulateIngestDocumentResult returns a SimulateIngestDocumentResult. +func NewSimulateIngestDocumentResult() *SimulateIngestDocumentResult { + r := &SimulateIngestDocumentResult{} + + return r +} + +// false diff --git a/typedapi/types/sizefield.go b/typedapi/types/sizefield.go index 41120fbb25..832cd9ff07 100644 --- a/typedapi/types/sizefield.go +++ b/typedapi/types/sizefield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SizeField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/meta-fields.ts#L54-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/meta-fields.ts#L54-L56 type SizeField struct { Enabled bool `json:"enabled"` } @@ -76,3 +76,13 @@ func NewSizeField() *SizeField { return r } + +// true + +type SizeFieldVariant interface { + SizeFieldCaster() *SizeField +} + +func (s *SizeField) SizeFieldCaster() *SizeField { + return s +} diff --git a/typedapi/types/sizehttphistogram.go b/typedapi/types/sizehttphistogram.go new file mode 100644 index 0000000000..4bf6a8d6e4 --- /dev/null +++ b/typedapi/types/sizehttphistogram.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SizeHttpHistogram type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L714-L718 +type SizeHttpHistogram struct { + Count int64 `json:"count"` + GeBytes *int64 `json:"ge_bytes,omitempty"` + LtBytes *int64 `json:"lt_bytes,omitempty"` +} + +func (s *SizeHttpHistogram) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "ge_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "GeBytes", err) + } + s.GeBytes = &value + case float64: + f := int64(v) + s.GeBytes = &f + } + + case "lt_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LtBytes", err) + } + s.LtBytes = &value + case float64: + f := int64(v) + s.LtBytes = &f + } + + } + } + return nil +} + +// NewSizeHttpHistogram returns a SizeHttpHistogram. +func NewSizeHttpHistogram() *SizeHttpHistogram { + r := &SizeHttpHistogram{} + + return r +} + +// false diff --git a/typedapi/types/slackaction.go b/typedapi/types/slackaction.go index 4d077f8743..c0df938430 100644 --- a/typedapi/types/slackaction.go +++ b/typedapi/types/slackaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SlackAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L91-L94 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L91-L94 type SlackAction struct { Account *string `json:"account,omitempty"` Message SlackMessage `json:"message"` @@ -80,3 +80,13 @@ func NewSlackAction() *SlackAction { return r } + +// true + +type SlackActionVariant interface { + SlackActionCaster() *SlackAction +} + +func (s *SlackAction) SlackActionCaster() *SlackAction { + return s +} diff --git a/typedapi/types/slackattachment.go b/typedapi/types/slackattachment.go index da4fb1a1f9..40de23d558 100644 --- a/typedapi/types/slackattachment.go +++ b/typedapi/types/slackattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SlackAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L101-L117 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L101-L117 type SlackAttachment struct { AuthorIcon *string `json:"author_icon,omitempty"` AuthorLink *string `json:"author_link,omitempty"` @@ -242,3 +242,13 @@ func NewSlackAttachment() *SlackAttachment { return r } + +// true + +type SlackAttachmentVariant interface { + SlackAttachmentCaster() *SlackAttachment +} + +func (s *SlackAttachment) SlackAttachmentCaster() *SlackAttachment { + return s +} diff --git a/typedapi/types/slackattachmentfield.go b/typedapi/types/slackattachmentfield.go index 4ce0ae98c9..16403a000d 100644 --- a/typedapi/types/slackattachmentfield.go +++ b/typedapi/types/slackattachmentfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SlackAttachmentField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L119-L123 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L119-L123 type SlackAttachmentField struct { Int bool `json:"short"` Title string `json:"title"` @@ -102,3 +102,13 @@ func NewSlackAttachmentField() *SlackAttachmentField { return r } + +// true + +type SlackAttachmentFieldVariant interface { + SlackAttachmentFieldCaster() *SlackAttachmentField +} + +func (s *SlackAttachmentField) SlackAttachmentFieldCaster() *SlackAttachmentField { + return s +} diff --git a/typedapi/types/slackdynamicattachment.go b/typedapi/types/slackdynamicattachment.go index 3548d05e66..8b091a60c1 100644 --- a/typedapi/types/slackdynamicattachment.go +++ b/typedapi/types/slackdynamicattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SlackDynamicAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L125-L128 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L125-L128 type SlackDynamicAttachment struct { AttachmentTemplate SlackAttachment `json:"attachment_template"` ListPath string `json:"list_path"` @@ -80,3 +80,13 @@ func NewSlackDynamicAttachment() *SlackDynamicAttachment { return r } + +// true + +type SlackDynamicAttachmentVariant interface { + SlackDynamicAttachmentCaster() *SlackDynamicAttachment +} + +func (s *SlackDynamicAttachment) SlackDynamicAttachmentCaster() *SlackDynamicAttachment { + return s +} diff --git a/typedapi/types/slackmessage.go b/typedapi/types/slackmessage.go index 0f3be52640..512f029b0e 100644 --- a/typedapi/types/slackmessage.go +++ b/typedapi/types/slackmessage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SlackMessage type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L130-L137 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L130-L137 type SlackMessage struct { Attachments []SlackAttachment `json:"attachments"` DynamicAttachments *SlackDynamicAttachment `json:"dynamic_attachments,omitempty"` @@ -118,3 +118,13 @@ func NewSlackMessage() *SlackMessage { return r } + +// true + +type SlackMessageVariant interface { + SlackMessageCaster() *SlackMessage +} + +func (s *SlackMessage) SlackMessageCaster() *SlackMessage { + return s +} diff --git a/typedapi/types/slackresult.go b/typedapi/types/slackresult.go index 5a058cb852..e0fd77bf0c 100644 --- a/typedapi/types/slackresult.go +++ b/typedapi/types/slackresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SlackResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L96-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L96-L99 type SlackResult struct { Account *string `json:"account,omitempty"` Message SlackMessage `json:"message"` @@ -80,3 +80,5 @@ func NewSlackResult() *SlackResult { return r } + +// false diff --git a/typedapi/types/slicedscroll.go b/typedapi/types/slicedscroll.go index 6366a7f640..43bad7cb80 100644 --- a/typedapi/types/slicedscroll.go +++ b/typedapi/types/slicedscroll.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SlicedScroll type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/SlicedScroll.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/SlicedScroll.ts#L23-L27 type SlicedScroll struct { Field *string `json:"field,omitempty"` Id string `json:"id"` @@ -90,3 +90,13 @@ func NewSlicedScroll() *SlicedScroll { return r } + +// true + +type SlicedScrollVariant interface { + SlicedScrollCaster() *SlicedScroll +} + +func (s *SlicedScroll) SlicedScrollCaster() *SlicedScroll { + return s +} diff --git a/typedapi/types/slices.go b/typedapi/types/slices.go index 7d8c69b404..acd54ea852 100644 --- a/typedapi/types/slices.go +++ b/typedapi/types/slices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // int // slicescalculation.SlicesCalculation // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L364-L369 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L368-L373 type Slices any diff --git a/typedapi/types/slm.go b/typedapi/types/slm.go index fc0ebf27a6..a76d8ff809 100644 --- a/typedapi/types/slm.go +++ b/typedapi/types/slm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Slm type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L449-L452 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L459-L462 type Slm struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -114,3 +114,5 @@ func NewSlm() *Slm { return r } + +// false diff --git a/typedapi/types/slmindicator.go b/typedapi/types/slmindicator.go index 4c137e5c69..2197e071ea 100644 --- a/typedapi/types/slmindicator.go +++ b/typedapi/types/slmindicator.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SlmIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L157-L161 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L174-L178 type SlmIndicator struct { Details *SlmIndicatorDetails `json:"details,omitempty"` Diagnosis []Diagnosis `json:"diagnosis,omitempty"` @@ -100,3 +100,5 @@ func NewSlmIndicator() *SlmIndicator { return r } + +// false diff --git a/typedapi/types/slmindicatordetails.go b/typedapi/types/slmindicatordetails.go index de0ec36106..6eec6c74cb 100644 --- a/typedapi/types/slmindicatordetails.go +++ b/typedapi/types/slmindicatordetails.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SlmIndicatorDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L162-L166 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L179-L183 type SlmIndicatorDetails struct { Policies int64 `json:"policies"` SlmStatus lifecycleoperationmode.LifecycleOperationMode `json:"slm_status"` @@ -91,3 +91,5 @@ func NewSlmIndicatorDetails() *SlmIndicatorDetails { return r } + +// false diff --git a/typedapi/types/slmindicatorunhealthypolicies.go b/typedapi/types/slmindicatorunhealthypolicies.go index b712f9015b..8bddfb677c 100644 --- a/typedapi/types/slmindicatorunhealthypolicies.go +++ b/typedapi/types/slmindicatorunhealthypolicies.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SlmIndicatorUnhealthyPolicies type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/health_report/types.ts#L168-L171 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L185-L188 type SlmIndicatorUnhealthyPolicies struct { Count int64 `json:"count"` InvocationsSinceLastSuccess map[string]int64 `json:"invocations_since_last_success,omitempty"` @@ -83,8 +83,10 @@ func (s *SlmIndicatorUnhealthyPolicies) UnmarshalJSON(data []byte) error { // NewSlmIndicatorUnhealthyPolicies returns a SlmIndicatorUnhealthyPolicies. func NewSlmIndicatorUnhealthyPolicies() *SlmIndicatorUnhealthyPolicies { r := &SlmIndicatorUnhealthyPolicies{ - InvocationsSinceLastSuccess: make(map[string]int64, 0), + InvocationsSinceLastSuccess: make(map[string]int64), } return r } + +// false diff --git a/typedapi/types/slmpolicy.go b/typedapi/types/slmpolicy.go index 04d102e3b3..f4de7face1 100644 --- a/typedapi/types/slmpolicy.go +++ b/typedapi/types/slmpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SLMPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/_types/SnapshotLifecycle.ts#L76-L82 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/_types/SnapshotLifecycle.ts#L86-L92 type SLMPolicy struct { Config *Configuration `json:"config,omitempty"` Name string `json:"name"` @@ -98,3 +98,5 @@ func NewSLMPolicy() *SLMPolicy { return r } + +// false diff --git a/typedapi/types/slowlogsettings.go b/typedapi/types/slowlogsettings.go index 18bb905327..70d9ed5aac 100644 --- a/typedapi/types/slowlogsettings.go +++ b/typedapi/types/slowlogsettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SlowlogSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L490-L495 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L515-L520 type SlowlogSettings struct { Level *string `json:"level,omitempty"` Reformat *bool `json:"reformat,omitempty"` @@ -112,3 +112,13 @@ func NewSlowlogSettings() *SlowlogSettings { return r } + +// true + +type SlowlogSettingsVariant interface { + SlowlogSettingsCaster() *SlowlogSettings +} + +func (s *SlowlogSettings) SlowlogSettingsCaster() *SlowlogSettings { + return s +} diff --git a/typedapi/types/slowlogtresholdlevels.go b/typedapi/types/slowlogtresholdlevels.go index d9b493f1cd..c50e31ec65 100644 --- a/typedapi/types/slowlogtresholdlevels.go +++ b/typedapi/types/slowlogtresholdlevels.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SlowlogTresholdLevels type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L502-L507 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L527-L532 type SlowlogTresholdLevels struct { Debug Duration `json:"debug,omitempty"` Info Duration `json:"info,omitempty"` @@ -84,3 +84,13 @@ func NewSlowlogTresholdLevels() *SlowlogTresholdLevels { return r } + +// true + +type SlowlogTresholdLevelsVariant interface { + SlowlogTresholdLevelsCaster() *SlowlogTresholdLevels +} + +func (s *SlowlogTresholdLevels) SlowlogTresholdLevelsCaster() *SlowlogTresholdLevels { + return s +} diff --git a/typedapi/types/slowlogtresholds.go b/typedapi/types/slowlogtresholds.go index 3d6f7e20d4..9c9e0de89b 100644 --- a/typedapi/types/slowlogtresholds.go +++ b/typedapi/types/slowlogtresholds.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SlowlogTresholds type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L497-L500 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L522-L525 type SlowlogTresholds struct { Fetch *SlowlogTresholdLevels `json:"fetch,omitempty"` Query *SlowlogTresholdLevels `json:"query,omitempty"` @@ -34,3 +34,13 @@ func NewSlowlogTresholds() *SlowlogTresholds { return r } + +// true + +type SlowlogTresholdsVariant interface { + SlowlogTresholdsCaster() *SlowlogTresholds +} + +func (s *SlowlogTresholds) SlowlogTresholdsCaster() *SlowlogTresholds { + return s +} diff --git a/typedapi/types/smoothingmodelcontainer.go b/typedapi/types/smoothingmodelcontainer.go index c5f015c6d2..4e1e17525a 100644 --- a/typedapi/types/smoothingmodelcontainer.go +++ b/typedapi/types/smoothingmodelcontainer.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // SmoothingModelContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L445-L461 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L445-L461 type SmoothingModelContainer struct { + AdditionalSmoothingModelContainerProperty map[string]json.RawMessage `json:"-"` // Laplace A smoothing model that uses an additive smoothing where a constant (typically // `1.0` or smaller) is added to all counts to balance weights. Laplace *LaplaceSmoothingModel `json:"laplace,omitempty"` @@ -36,9 +42,50 @@ type SmoothingModelContainer struct { StupidBackoff *StupidBackoffSmoothingModel `json:"stupid_backoff,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s SmoothingModelContainer) MarshalJSON() ([]byte, error) { + type opt SmoothingModelContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSmoothingModelContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSmoothingModelContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewSmoothingModelContainer returns a SmoothingModelContainer. func NewSmoothingModelContainer() *SmoothingModelContainer { - r := &SmoothingModelContainer{} + r := &SmoothingModelContainer{ + AdditionalSmoothingModelContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type SmoothingModelContainerVariant interface { + SmoothingModelContainerCaster() *SmoothingModelContainer +} + +func (s *SmoothingModelContainer) SmoothingModelContainerCaster() *SmoothingModelContainer { + return s +} diff --git a/typedapi/types/snapshotindexstats.go b/typedapi/types/snapshotindexstats.go index d6a7cebc23..3905f44b82 100644 --- a/typedapi/types/snapshotindexstats.go +++ b/typedapi/types/snapshotindexstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SnapshotIndexStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotIndexStats.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotIndexStats.ts#L25-L29 type SnapshotIndexStats struct { Shards map[string]SnapshotShardsStatus `json:"shards"` ShardsStats SnapshotShardsStats `json:"shards_stats"` @@ -32,8 +32,10 @@ type SnapshotIndexStats struct { // NewSnapshotIndexStats returns a SnapshotIndexStats. func NewSnapshotIndexStats() *SnapshotIndexStats { r := &SnapshotIndexStats{ - Shards: make(map[string]SnapshotShardsStatus, 0), + Shards: make(map[string]SnapshotShardsStatus), } return r } + +// false diff --git a/typedapi/types/snapshotinfo.go b/typedapi/types/snapshotinfo.go index 4b5d5764a1..3e3f0f2530 100644 --- a/typedapi/types/snapshotinfo.go +++ b/typedapi/types/snapshotinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SnapshotInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotInfo.ts#L41-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotInfo.ts#L41-L71 type SnapshotInfo struct { DataStreams []string `json:"data_streams"` Duration Duration `json:"duration,omitempty"` @@ -210,8 +210,10 @@ func (s *SnapshotInfo) UnmarshalJSON(data []byte) error { // NewSnapshotInfo returns a SnapshotInfo. func NewSnapshotInfo() *SnapshotInfo { r := &SnapshotInfo{ - IndexDetails: make(map[string]IndexDetails, 0), + IndexDetails: make(map[string]IndexDetails), } return r } + +// false diff --git a/typedapi/types/snapshotlifecycle.go b/typedapi/types/snapshotlifecycle.go index 1656af0031..77ec67def7 100644 --- a/typedapi/types/snapshotlifecycle.go +++ b/typedapi/types/snapshotlifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,18 +30,22 @@ import ( // SnapshotLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/_types/SnapshotLifecycle.ts#L38-L49 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/_types/SnapshotLifecycle.ts#L38-L59 type SnapshotLifecycle struct { - InProgress *InProgress `json:"in_progress,omitempty"` - LastFailure *Invocation `json:"last_failure,omitempty"` - LastSuccess *Invocation `json:"last_success,omitempty"` - ModifiedDate DateTime `json:"modified_date,omitempty"` - ModifiedDateMillis int64 `json:"modified_date_millis"` - NextExecution DateTime `json:"next_execution,omitempty"` - NextExecutionMillis int64 `json:"next_execution_millis"` - Policy SLMPolicy `json:"policy"` - Stats Statistics `json:"stats"` - Version int64 `json:"version"` + InProgress *InProgress `json:"in_progress,omitempty"` + LastFailure *Invocation `json:"last_failure,omitempty"` + LastSuccess *Invocation `json:"last_success,omitempty"` + // ModifiedDate The last time the policy was modified. + ModifiedDate DateTime `json:"modified_date,omitempty"` + ModifiedDateMillis int64 `json:"modified_date_millis"` + // NextExecution The next time the policy will run. + NextExecution DateTime `json:"next_execution,omitempty"` + NextExecutionMillis int64 `json:"next_execution_millis"` + Policy SLMPolicy `json:"policy"` + Stats Statistics `json:"stats"` + // Version The version of the snapshot policy. + // Only the latest version is stored and incremented when the policy is updated. + Version int64 `json:"version"` } func (s *SnapshotLifecycle) UnmarshalJSON(data []byte) error { @@ -120,3 +124,5 @@ func NewSnapshotLifecycle() *SnapshotLifecycle { return r } + +// false diff --git a/typedapi/types/snapshotnodeinfo.go b/typedapi/types/snapshotnodeinfo.go new file mode 100644 index 0000000000..435e2189fb --- /dev/null +++ b/typedapi/types/snapshotnodeinfo.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SnapshotNodeInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L110-L113 +type SnapshotNodeInfo struct { + Id string `json:"id"` + Name string `json:"name"` +} + +func (s *SnapshotNodeInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewSnapshotNodeInfo returns a SnapshotNodeInfo. +func NewSnapshotNodeInfo() *SnapshotNodeInfo { + r := &SnapshotNodeInfo{} + + return r +} + +// false diff --git a/typedapi/types/snapshotresponseitem.go b/typedapi/types/snapshotresponseitem.go index f7f3d4cd21..1264c9bfb1 100644 --- a/typedapi/types/snapshotresponseitem.go +++ b/typedapi/types/snapshotresponseitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SnapshotResponseItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/get/SnapshotGetResponse.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/get/SnapshotGetResponse.ts#L44-L48 type SnapshotResponseItem struct { Error *ErrorCause `json:"error,omitempty"` Repository string `json:"repository"` @@ -78,3 +78,5 @@ func NewSnapshotResponseItem() *SnapshotResponseItem { return r } + +// false diff --git a/typedapi/types/snapshotrestore.go b/typedapi/types/snapshotrestore.go index e26690878c..6e92cc4941 100644 --- a/typedapi/types/snapshotrestore.go +++ b/typedapi/types/snapshotrestore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SnapshotRestore type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/restore/SnapshotRestoreResponse.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/restore/SnapshotRestoreResponse.ts#L30-L34 type SnapshotRestore struct { Indices []string `json:"indices"` Shards ShardStatistics `json:"shards"` @@ -86,3 +86,5 @@ func NewSnapshotRestore() *SnapshotRestore { return r } + +// false diff --git a/typedapi/types/snapshotshardfailure.go b/typedapi/types/snapshotshardfailure.go index 52bf0fc353..2195ce142a 100644 --- a/typedapi/types/snapshotshardfailure.go +++ b/typedapi/types/snapshotshardfailure.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,13 +31,14 @@ import ( // SnapshotShardFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotShardFailure.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotShardFailure.ts#L22-L29 type SnapshotShardFailure struct { - Index string `json:"index"` - NodeId *string `json:"node_id,omitempty"` - Reason string `json:"reason"` - ShardId string `json:"shard_id"` - Status string `json:"status"` + Index string `json:"index"` + IndexUuid string `json:"index_uuid"` + NodeId *string `json:"node_id,omitempty"` + Reason string `json:"reason"` + ShardId string `json:"shard_id"` + Status string `json:"status"` } func (s *SnapshotShardFailure) UnmarshalJSON(data []byte) error { @@ -60,6 +61,11 @@ func (s *SnapshotShardFailure) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Index", err) } + case "index_uuid": + if err := dec.Decode(&s.IndexUuid); err != nil { + return fmt.Errorf("%s | %w", "IndexUuid", err) + } + case "node_id": if err := dec.Decode(&s.NodeId); err != nil { return fmt.Errorf("%s | %w", "NodeId", err) @@ -105,3 +111,5 @@ func NewSnapshotShardFailure() *SnapshotShardFailure { return r } + +// false diff --git a/typedapi/types/snapshotshardsstats.go b/typedapi/types/snapshotshardsstats.go index 1714ff19d9..6666a1b7f2 100644 --- a/typedapi/types/snapshotshardsstats.go +++ b/typedapi/types/snapshotshardsstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SnapshotShardsStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotShardsStats.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotShardsStats.ts#L22-L29 type SnapshotShardsStats struct { Done int64 `json:"done"` Failed int64 `json:"failed"` @@ -157,3 +157,5 @@ func NewSnapshotShardsStats() *SnapshotShardsStats { return r } + +// false diff --git a/typedapi/types/snapshotshardsstatus.go b/typedapi/types/snapshotshardsstatus.go index c1a21d7316..7be5793ca5 100644 --- a/typedapi/types/snapshotshardsstatus.go +++ b/typedapi/types/snapshotshardsstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // SnapshotShardsStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotShardsStatus.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotShardsStatus.ts#L24-L27 type SnapshotShardsStatus struct { Stage shardsstatsstage.ShardsStatsStage `json:"stage"` Stats ShardsStatsSummary `json:"stats"` @@ -38,3 +38,5 @@ func NewSnapshotShardsStatus() *SnapshotShardsStatus { return r } + +// false diff --git a/typedapi/types/snapshotsrecord.go b/typedapi/types/snapshotsrecord.go index 0004b0a44a..f81a2d6bb4 100644 --- a/typedapi/types/snapshotsrecord.go +++ b/typedapi/types/snapshotsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SnapshotsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/snapshots/types.ts#L24-L96 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/snapshots/types.ts#L24-L96 type SnapshotsRecord struct { // Duration The time it took the snapshot process to complete, in time units. Duration Duration `json:"duration,omitempty"` @@ -248,3 +248,5 @@ func NewSnapshotsRecord() *SnapshotsRecord { return r } + +// false diff --git a/typedapi/types/snapshotstats.go b/typedapi/types/snapshotstats.go index d07f8b54cf..494b0a5104 100644 --- a/typedapi/types/snapshotstats.go +++ b/typedapi/types/snapshotstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SnapshotStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotStats.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotStats.ts#L23-L29 type SnapshotStats struct { Incremental FileCountSnapshotStats `json:"incremental"` StartTimeInMillis int64 `json:"start_time_in_millis"` @@ -90,3 +90,5 @@ func NewSnapshotStats() *SnapshotStats { return r } + +// false diff --git a/typedapi/types/snowballanalyzer.go b/typedapi/types/snowballanalyzer.go index 00dd3dbf82..2ad4527c0b 100644 --- a/typedapi/types/snowballanalyzer.go +++ b/typedapi/types/snowballanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // SnowballAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L88-L93 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L325-L330 type SnowballAnalyzer struct { Language snowballlanguage.SnowballLanguage `json:"language"` Stopwords []string `json:"stopwords,omitempty"` @@ -112,3 +112,13 @@ func NewSnowballAnalyzer() *SnowballAnalyzer { return r } + +// true + +type SnowballAnalyzerVariant interface { + SnowballAnalyzerCaster() *SnowballAnalyzer +} + +func (s *SnowballAnalyzer) SnowballAnalyzerCaster() *SnowballAnalyzer { + return s +} diff --git a/typedapi/types/snowballtokenfilter.go b/typedapi/types/snowballtokenfilter.go index 53ad25c635..97bfe00c6f 100644 --- a/typedapi/types/snowballtokenfilter.go +++ b/typedapi/types/snowballtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // SnowballTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L311-L314 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L310-L313 type SnowballTokenFilter struct { Language *snowballlanguage.SnowballLanguage `json:"language,omitempty"` Type string `json:"type,omitempty"` @@ -94,3 +94,13 @@ func NewSnowballTokenFilter() *SnowballTokenFilter { return r } + +// true + +type SnowballTokenFilterVariant interface { + SnowballTokenFilterCaster() *SnowballTokenFilter +} + +func (s *SnowballTokenFilter) SnowballTokenFilterCaster() *SnowballTokenFilter { + return s +} diff --git a/typedapi/types/softdeletes.go b/typedapi/types/softdeletes.go index 3de18f96c2..699e7c9f2f 100644 --- a/typedapi/types/softdeletes.go +++ b/typedapi/types/softdeletes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SoftDeletes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L50-L63 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L51-L64 type SoftDeletes struct { // Enabled Indicates whether soft deletes are enabled on the index. Enabled *bool `json:"enabled,omitempty"` @@ -90,3 +90,13 @@ func NewSoftDeletes() *SoftDeletes { return r } + +// true + +type SoftDeletesVariant interface { + SoftDeletesCaster() *SoftDeletes +} + +func (s *SoftDeletes) SoftDeletesCaster() *SoftDeletes { + return s +} diff --git a/typedapi/types/soranianalyzer.go b/typedapi/types/soranianalyzer.go new file mode 100644 index 0000000000..1399de4a5a --- /dev/null +++ b/typedapi/types/soranianalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SoraniAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L269-L274 +type SoraniAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SoraniAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SoraniAnalyzer) MarshalJSON() ([]byte, error) { + type innerSoraniAnalyzer SoraniAnalyzer + tmp := innerSoraniAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "sorani" + + return json.Marshal(tmp) +} + +// NewSoraniAnalyzer returns a SoraniAnalyzer. +func NewSoraniAnalyzer() *SoraniAnalyzer { + r := &SoraniAnalyzer{} + + return r +} + +// true + +type SoraniAnalyzerVariant interface { + SoraniAnalyzerCaster() *SoraniAnalyzer +} + +func (s *SoraniAnalyzer) SoraniAnalyzerCaster() *SoraniAnalyzer { + return s +} diff --git a/typedapi/types/sort.go b/typedapi/types/sort.go index a716eb4899..666bb7cfd8 100644 --- a/typedapi/types/sort.go +++ b/typedapi/types/sort.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Sort type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L105-L105 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L104-L104 type Sort []SortCombinations + +type SortVariant interface { + SortCaster() *Sort +} diff --git a/typedapi/types/sortcombinations.go b/typedapi/types/sortcombinations.go index 235007534c..f7aae067fe 100644 --- a/typedapi/types/sortcombinations.go +++ b/typedapi/types/sortcombinations.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // SortOptions // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L99-L103 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L98-L102 type SortCombinations any + +type SortCombinationsVariant interface { + SortCombinationsCaster() *SortCombinations +} diff --git a/typedapi/types/sortoptions.go b/typedapi/types/sortoptions.go index facea9b2a2..a0ee61d46a 100644 --- a/typedapi/types/sortoptions.go +++ b/typedapi/types/sortoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -27,7 +27,7 @@ import ( // SortOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/sort.ts#L87-L97 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/sort.ts#L86-L96 type SortOptions struct { Doc_ *ScoreSort `json:"_doc,omitempty"` GeoDistance_ *GeoDistanceSort `json:"_geo_distance,omitempty"` @@ -68,8 +68,18 @@ func (s SortOptions) MarshalJSON() ([]byte, error) { // NewSortOptions returns a SortOptions. func NewSortOptions() *SortOptions { r := &SortOptions{ - SortOptions: make(map[string]FieldSort, 0), + SortOptions: make(map[string]FieldSort), } return r } + +// true + +type SortOptionsVariant interface { + SortOptionsCaster() *SortOptions +} + +func (s *SortOptions) SortOptionsCaster() *SortOptions { + return s +} diff --git a/typedapi/types/sortprocessor.go b/typedapi/types/sortprocessor.go index a464187227..bb0fd02299 100644 --- a/typedapi/types/sortprocessor.go +++ b/typedapi/types/sortprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SortProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L1079-L1095 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1478-L1494 type SortProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -153,3 +153,13 @@ func NewSortProcessor() *SortProcessor { return r } + +// true + +type SortProcessorVariant interface { + SortProcessorCaster() *SortProcessor +} + +func (s *SortProcessor) SortProcessorCaster() *SortProcessor { + return s +} diff --git a/typedapi/types/sourceconfig.go b/typedapi/types/sourceconfig.go index 37046db4f9..2bae72a121 100644 --- a/typedapi/types/sourceconfig.go +++ b/typedapi/types/sourceconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // bool // SourceFilter // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/SourceFilter.ts#L33-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/SourceFilter.ts#L33-L37 type SourceConfig any + +type SourceConfigVariant interface { + SourceConfigCaster() *SourceConfig +} diff --git a/typedapi/types/sourceconfigparam.go b/typedapi/types/sourceconfigparam.go index dde0bb3407..3d77418d96 100644 --- a/typedapi/types/sourceconfigparam.go +++ b/typedapi/types/sourceconfigparam.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // bool // []string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/SourceFilter.ts#L39-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/SourceFilter.ts#L39-L45 type SourceConfigParam any diff --git a/typedapi/types/sourcefield.go b/typedapi/types/sourcefield.go index 528862083a..00a9253b26 100644 --- a/typedapi/types/sourcefield.go +++ b/typedapi/types/sourcefield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SourceField type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/meta-fields.ts#L58-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/meta-fields.ts#L58-L65 type SourceField struct { Compress *bool `json:"compress,omitempty"` CompressThreshold *string `json:"compress_threshold,omitempty"` @@ -124,3 +124,13 @@ func NewSourceField() *SourceField { return r } + +// true + +type SourceFieldVariant interface { + SourceFieldCaster() *SourceField +} + +func (s *SourceField) SourceFieldCaster() *SourceField { + return s +} diff --git a/typedapi/types/sourcefilter.go b/typedapi/types/sourcefilter.go index 58a9c0f87f..34bfac5bf1 100644 --- a/typedapi/types/sourcefilter.go +++ b/typedapi/types/sourcefilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SourceFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/SourceFilter.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/SourceFilter.ts#L23-L31 type SourceFilter struct { Excludes []string `json:"excludes,omitempty"` Includes []string `json:"includes,omitempty"` @@ -104,3 +104,13 @@ func NewSourceFilter() *SourceFilter { return r } + +// true + +type SourceFilterVariant interface { + SourceFilterCaster() *SourceFilter +} + +func (s *SourceFilter) SourceFilterCaster() *SourceFilter { + return s +} diff --git a/typedapi/types/sourceindex.go b/typedapi/types/sourceindex.go new file mode 100644 index 0000000000..252f468f56 --- /dev/null +++ b/typedapi/types/sourceindex.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SourceIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L50-L52 +type SourceIndex struct { + Index string `json:"index"` +} + +func (s *SourceIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + } + } + return nil +} + +// NewSourceIndex returns a SourceIndex. +func NewSourceIndex() *SourceIndex { + r := &SourceIndex{} + + return r +} + +// true + +type SourceIndexVariant interface { + SourceIndexCaster() *SourceIndex +} + +func (s *SourceIndex) SourceIndexCaster() *SourceIndex { + return s +} diff --git a/typedapi/types/sourceonlyrepository.go b/typedapi/types/sourceonlyrepository.go index 1a431c6b68..3e241b0f06 100644 --- a/typedapi/types/sourceonlyrepository.go +++ b/typedapi/types/sourceonlyrepository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SourceOnlyRepository type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L65-L68 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L65-L68 type SourceOnlyRepository struct { Settings SourceOnlyRepositorySettings `json:"settings"` Type string `json:"type,omitempty"` @@ -92,3 +92,13 @@ func NewSourceOnlyRepository() *SourceOnlyRepository { return r } + +// true + +type SourceOnlyRepositoryVariant interface { + SourceOnlyRepositoryCaster() *SourceOnlyRepository +} + +func (s *SourceOnlyRepository) SourceOnlyRepositoryCaster() *SourceOnlyRepository { + return s +} diff --git a/typedapi/types/sourceonlyrepositorysettings.go b/typedapi/types/sourceonlyrepositorysettings.go index 5e1b1d786e..9cac936e0e 100644 --- a/typedapi/types/sourceonlyrepositorysettings.go +++ b/typedapi/types/sourceonlyrepositorysettings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SourceOnlyRepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotRepository.ts#L117-L124 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotRepository.ts#L117-L124 type SourceOnlyRepositorySettings struct { ChunkSize ByteSize `json:"chunk_size,omitempty"` Compress *bool `json:"compress,omitempty"` @@ -139,3 +139,13 @@ func NewSourceOnlyRepositorySettings() *SourceOnlyRepositorySettings { return r } + +// true + +type SourceOnlyRepositorySettingsVariant interface { + SourceOnlyRepositorySettingsCaster() *SourceOnlyRepositorySettings +} + +func (s *SourceOnlyRepositorySettings) SourceOnlyRepositorySettingsCaster() *SourceOnlyRepositorySettings { + return s +} diff --git a/typedapi/types/spancontainingquery.go b/typedapi/types/spancontainingquery.go index 729935d78d..bf67e1e274 100644 --- a/typedapi/types/spancontainingquery.go +++ b/typedapi/types/spancontainingquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,11 @@ import ( // SpanContainingQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L25-L36 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L25-L39 type SpanContainingQuery struct { // Big Can be any span query. // Matching spans from `big` that contain matches from `little` are returned. - Big *SpanQuery `json:"big,omitempty"` + Big SpanQuery `json:"big"` // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. @@ -44,8 +44,8 @@ type SpanContainingQuery struct { Boost *float32 `json:"boost,omitempty"` // Little Can be any span query. // Matching spans from `big` that contain matches from `little` are returned. - Little *SpanQuery `json:"little,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + Little SpanQuery `json:"little"` + QueryName_ *string `json:"_name,omitempty"` } func (s *SpanContainingQuery) UnmarshalJSON(data []byte) error { @@ -112,3 +112,13 @@ func NewSpanContainingQuery() *SpanContainingQuery { return r } + +// true + +type SpanContainingQueryVariant interface { + SpanContainingQueryCaster() *SpanContainingQuery +} + +func (s *SpanContainingQuery) SpanContainingQueryCaster() *SpanContainingQuery { + return s +} diff --git a/typedapi/types/spanfieldmaskingquery.go b/typedapi/types/spanfieldmaskingquery.go index fde77c6d1f..4978ba16e9 100644 --- a/typedapi/types/spanfieldmaskingquery.go +++ b/typedapi/types/spanfieldmaskingquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,17 +31,17 @@ import ( // SpanFieldMaskingQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L38-L41 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L41-L47 type SpanFieldMaskingQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. // A boost value between 0 and 1.0 decreases the relevance score. // A value greater than 1.0 increases the relevance score. - Boost *float32 `json:"boost,omitempty"` - Field string `json:"field"` - Query *SpanQuery `json:"query,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + Boost *float32 `json:"boost,omitempty"` + Field string `json:"field"` + Query SpanQuery `json:"query"` + QueryName_ *string `json:"_name,omitempty"` } func (s *SpanFieldMaskingQuery) UnmarshalJSON(data []byte) error { @@ -108,3 +108,13 @@ func NewSpanFieldMaskingQuery() *SpanFieldMaskingQuery { return r } + +// true + +type SpanFieldMaskingQueryVariant interface { + SpanFieldMaskingQueryCaster() *SpanFieldMaskingQuery +} + +func (s *SpanFieldMaskingQuery) SpanFieldMaskingQueryCaster() *SpanFieldMaskingQuery { + return s +} diff --git a/typedapi/types/spanfirstquery.go b/typedapi/types/spanfirstquery.go index 72d2e739c9..e63d308323 100644 --- a/typedapi/types/spanfirstquery.go +++ b/typedapi/types/spanfirstquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SpanFirstQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L43-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L49-L61 type SpanFirstQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -42,8 +42,8 @@ type SpanFirstQuery struct { // End Controls the maximum end position permitted in a match. End int `json:"end"` // Match Can be any other span type query. - Match *SpanQuery `json:"match,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + Match SpanQuery `json:"match"` + QueryName_ *string `json:"_name,omitempty"` } func (s *SpanFirstQuery) UnmarshalJSON(data []byte) error { @@ -121,3 +121,13 @@ func NewSpanFirstQuery() *SpanFirstQuery { return r } + +// true + +type SpanFirstQueryVariant interface { + SpanFirstQueryCaster() *SpanFirstQuery +} + +func (s *SpanFirstQuery) SpanFirstQueryCaster() *SpanFirstQuery { + return s +} diff --git a/typedapi/types/spangapquery.go b/typedapi/types/spangapquery.go index f1081f0987..cfe1a79fd7 100644 --- a/typedapi/types/spangapquery.go +++ b/typedapi/types/spangapquery.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SpanGapQuery type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L54-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L63-L65 type SpanGapQuery map[string]int + +type SpanGapQueryVariant interface { + SpanGapQueryCaster() *SpanGapQuery +} diff --git a/typedapi/types/spanishanalyzer.go b/typedapi/types/spanishanalyzer.go new file mode 100644 index 0000000000..997249a534 --- /dev/null +++ b/typedapi/types/spanishanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L276-L281 +type SpanishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SpanishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SpanishAnalyzer) MarshalJSON() ([]byte, error) { + type innerSpanishAnalyzer SpanishAnalyzer + tmp := innerSpanishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "spanish" + + return json.Marshal(tmp) +} + +// NewSpanishAnalyzer returns a SpanishAnalyzer. +func NewSpanishAnalyzer() *SpanishAnalyzer { + r := &SpanishAnalyzer{} + + return r +} + +// true + +type SpanishAnalyzerVariant interface { + SpanishAnalyzerCaster() *SpanishAnalyzer +} + +func (s *SpanishAnalyzer) SpanishAnalyzerCaster() *SpanishAnalyzer { + return s +} diff --git a/typedapi/types/spanmultitermquery.go b/typedapi/types/spanmultitermquery.go index a14aac47bf..ae46d57277 100644 --- a/typedapi/types/spanmultitermquery.go +++ b/typedapi/types/spanmultitermquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SpanMultiTermQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L58-L63 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L67-L75 type SpanMultiTermQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -41,7 +41,7 @@ type SpanMultiTermQuery struct { Boost *float32 `json:"boost,omitempty"` // Match Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, // or `regexp` query). - Match *Query `json:"match,omitempty"` + Match Query `json:"match"` QueryName_ *string `json:"_name,omitempty"` } @@ -104,3 +104,13 @@ func NewSpanMultiTermQuery() *SpanMultiTermQuery { return r } + +// true + +type SpanMultiTermQueryVariant interface { + SpanMultiTermQueryCaster() *SpanMultiTermQuery +} + +func (s *SpanMultiTermQuery) SpanMultiTermQueryCaster() *SpanMultiTermQuery { + return s +} diff --git a/typedapi/types/spannearquery.go b/typedapi/types/spannearquery.go index 2ba83a4192..7922a7ca4d 100644 --- a/typedapi/types/spannearquery.go +++ b/typedapi/types/spannearquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SpanNearQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L65-L78 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L77-L93 type SpanNearQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -137,3 +137,13 @@ func NewSpanNearQuery() *SpanNearQuery { return r } + +// true + +type SpanNearQueryVariant interface { + SpanNearQueryCaster() *SpanNearQuery +} + +func (s *SpanNearQuery) SpanNearQueryCaster() *SpanNearQuery { + return s +} diff --git a/typedapi/types/spannotquery.go b/typedapi/types/spannotquery.go index 3c0c8dab93..13ca420dd3 100644 --- a/typedapi/types/spannotquery.go +++ b/typedapi/types/spannotquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SpanNotQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L80-L104 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L95-L122 type SpanNotQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -44,9 +44,9 @@ type SpanNotQuery struct { // Equivalent to setting both `pre` and `post`. Dist *int `json:"dist,omitempty"` // Exclude Span query whose matches must not overlap those returned. - Exclude *SpanQuery `json:"exclude,omitempty"` + Exclude SpanQuery `json:"exclude"` // Include Span query whose matches are filtered. - Include *SpanQuery `json:"include,omitempty"` + Include SpanQuery `json:"include"` // Post The number of tokens after the include span that can’t have overlap with the // exclude span. Post *int `json:"post,omitempty"` @@ -168,3 +168,13 @@ func NewSpanNotQuery() *SpanNotQuery { return r } + +// true + +type SpanNotQueryVariant interface { + SpanNotQueryCaster() *SpanNotQuery +} + +func (s *SpanNotQuery) SpanNotQueryCaster() *SpanNotQuery { + return s +} diff --git a/typedapi/types/spanorquery.go b/typedapi/types/spanorquery.go index 5b4530158c..996dc46699 100644 --- a/typedapi/types/spanorquery.go +++ b/typedapi/types/spanorquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SpanOrQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L106-L111 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L124-L132 type SpanOrQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -103,3 +103,13 @@ func NewSpanOrQuery() *SpanOrQuery { return r } + +// true + +type SpanOrQueryVariant interface { + SpanOrQueryCaster() *SpanOrQuery +} + +func (s *SpanOrQuery) SpanOrQueryCaster() *SpanOrQuery { + return s +} diff --git a/typedapi/types/spanquery.go b/typedapi/types/spanquery.go index e8624eabca..222eb346e9 100644 --- a/typedapi/types/spanquery.go +++ b/typedapi/types/spanquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,8 +30,9 @@ import ( // SpanQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L131-L173 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L158-L200 type SpanQuery struct { + AdditionalSpanQueryProperty map[string]json.RawMessage `json:"-"` // SpanContaining Accepts a list of span queries, but only returns those spans which also match // a second span query. SpanContaining *SpanContainingQuery `json:"span_containing,omitempty"` @@ -126,16 +127,69 @@ func (s *SpanQuery) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "SpanWithin", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalSpanQueryProperty == nil { + s.AdditionalSpanQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalSpanQueryProperty", err) + } + s.AdditionalSpanQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s SpanQuery) MarshalJSON() ([]byte, error) { + type opt SpanQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSpanQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSpanQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewSpanQuery returns a SpanQuery. func NewSpanQuery() *SpanQuery { r := &SpanQuery{ - SpanTerm: make(map[string]SpanTermQuery, 0), + AdditionalSpanQueryProperty: make(map[string]json.RawMessage), + SpanTerm: make(map[string]SpanTermQuery), } return r } + +// true + +type SpanQueryVariant interface { + SpanQueryCaster() *SpanQuery +} + +func (s *SpanQuery) SpanQueryCaster() *SpanQuery { + return s +} diff --git a/typedapi/types/spantermquery.go b/typedapi/types/spantermquery.go index 0ecb831bc7..ab304b23b8 100644 --- a/typedapi/types/spantermquery.go +++ b/typedapi/types/spantermquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SpanTermQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L113-L116 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L134-L140 type SpanTermQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -121,3 +121,13 @@ func NewSpanTermQuery() *SpanTermQuery { return r } + +// true + +type SpanTermQueryVariant interface { + SpanTermQueryCaster() *SpanTermQuery +} + +func (s *SpanTermQuery) SpanTermQueryCaster() *SpanTermQuery { + return s +} diff --git a/typedapi/types/spanwithinquery.go b/typedapi/types/spanwithinquery.go index 55c790cefa..49c0c3d434 100644 --- a/typedapi/types/spanwithinquery.go +++ b/typedapi/types/spanwithinquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,11 @@ import ( // SpanWithinQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/span.ts#L118-L129 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/span.ts#L142-L156 type SpanWithinQuery struct { // Big Can be any span query. // Matching spans from `little` that are enclosed within `big` are returned. - Big *SpanQuery `json:"big,omitempty"` + Big SpanQuery `json:"big"` // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. @@ -44,8 +44,8 @@ type SpanWithinQuery struct { Boost *float32 `json:"boost,omitempty"` // Little Can be any span query. // Matching spans from `little` that are enclosed within `big` are returned. - Little *SpanQuery `json:"little,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + Little SpanQuery `json:"little"` + QueryName_ *string `json:"_name,omitempty"` } func (s *SpanWithinQuery) UnmarshalJSON(data []byte) error { @@ -112,3 +112,13 @@ func NewSpanWithinQuery() *SpanWithinQuery { return r } + +// true + +type SpanWithinQueryVariant interface { + SpanWithinQueryCaster() *SpanWithinQuery +} + +func (s *SpanWithinQuery) SpanWithinQueryCaster() *SpanWithinQuery { + return s +} diff --git a/typedapi/types/sparseembeddingresult.go b/typedapi/types/sparseembeddingresult.go index abf3a50e51..e8209d3a48 100644 --- a/typedapi/types/sparseembeddingresult.go +++ b/typedapi/types/sparseembeddingresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SparseEmbeddingResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Results.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Results.ts#L36-L38 type SparseEmbeddingResult struct { Embedding SparseVector `json:"embedding"` } @@ -66,3 +66,5 @@ func NewSparseEmbeddingResult() *SparseEmbeddingResult { return r } + +// false diff --git a/typedapi/types/sparsevector.go b/typedapi/types/sparsevector.go index 9814992119..c3b7c62131 100644 --- a/typedapi/types/sparsevector.go +++ b/typedapi/types/sparsevector.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SparseVector type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Results.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Results.ts#L24-L28 type SparseVector map[string]float32 diff --git a/typedapi/types/sparsevectorproperty.go b/typedapi/types/sparsevectorproperty.go index a5e30cfc96..2484914a94 100644 --- a/typedapi/types/sparsevectorproperty.go +++ b/typedapi/types/sparsevectorproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,19 +29,21 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // SparseVectorProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L202-L204 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L206-L208 type SparseVectorProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *SparseVectorProperty) UnmarshalJSON(data []byte) error { @@ -83,301 +85,313 @@ func (s *SparseVectorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -426,306 +440,323 @@ func (s *SparseVectorProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -740,12 +771,13 @@ func (s *SparseVectorProperty) UnmarshalJSON(data []byte) error { func (s SparseVectorProperty) MarshalJSON() ([]byte, error) { type innerSparseVectorProperty SparseVectorProperty tmp := innerSparseVectorProperty{ - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Type: s.Type, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "sparse_vector" @@ -756,10 +788,20 @@ func (s SparseVectorProperty) MarshalJSON() ([]byte, error) { // NewSparseVectorProperty returns a SparseVectorProperty. func NewSparseVectorProperty() *SparseVectorProperty { r := &SparseVectorProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type SparseVectorPropertyVariant interface { + SparseVectorPropertyCaster() *SparseVectorProperty +} + +func (s *SparseVectorProperty) SparseVectorPropertyCaster() *SparseVectorProperty { + return s +} diff --git a/typedapi/types/sparsevectorquery.go b/typedapi/types/sparsevectorquery.go index bd90efb297..c269d49839 100644 --- a/typedapi/types/sparsevectorquery.go +++ b/typedapi/types/sparsevectorquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,8 +31,9 @@ import ( // SparseVectorQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/SparseVectorQuery.ts#L26-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/SparseVectorQuery.ts#L26-L80 type SparseVectorQuery struct { + AdditionalSparseVectorQueryProperty map[string]json.RawMessage `json:"-"` // Boost Floating point number used to decrease or increase the relevance scores of // the query. // Boost values are relative to the default value of 1.0. @@ -164,16 +165,69 @@ func (s *SparseVectorQuery) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "QueryVector", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalSparseVectorQueryProperty == nil { + s.AdditionalSparseVectorQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalSparseVectorQueryProperty", err) + } + s.AdditionalSparseVectorQueryProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s SparseVectorQuery) MarshalJSON() ([]byte, error) { + type opt SparseVectorQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSparseVectorQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSparseVectorQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewSparseVectorQuery returns a SparseVectorQuery. func NewSparseVectorQuery() *SparseVectorQuery { r := &SparseVectorQuery{ - QueryVector: make(map[string]float32, 0), + AdditionalSparseVectorQueryProperty: make(map[string]json.RawMessage), + QueryVector: make(map[string]float32), } return r } + +// true + +type SparseVectorQueryVariant interface { + SparseVectorQueryCaster() *SparseVectorQuery +} + +func (s *SparseVectorQuery) SparseVectorQueryCaster() *SparseVectorQuery { + return s +} diff --git a/typedapi/types/splitprocessor.go b/typedapi/types/splitprocessor.go index cd36293ea4..bc0fb506ca 100644 --- a/typedapi/types/splitprocessor.go +++ b/typedapi/types/splitprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SplitProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L1097-L1122 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1496-L1521 type SplitProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -190,3 +190,13 @@ func NewSplitProcessor() *SplitProcessor { return r } + +// true + +type SplitProcessorVariant interface { + SplitProcessorCaster() *SplitProcessor +} + +func (s *SplitProcessor) SplitProcessorCaster() *SplitProcessor { + return s +} diff --git a/typedapi/types/sql.go b/typedapi/types/sql.go index 51319c480e..9238be9c1d 100644 --- a/typedapi/types/sql.go +++ b/typedapi/types/sql.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Sql type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L386-L389 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L396-L399 type Sql struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -106,9 +106,11 @@ func (s *Sql) UnmarshalJSON(data []byte) error { // NewSql returns a Sql. func NewSql() *Sql { r := &Sql{ - Features: make(map[string]int, 0), - Queries: make(map[string]XpackQuery, 0), + Features: make(map[string]int), + Queries: make(map[string]XpackQuery), } return r } + +// false diff --git a/typedapi/types/ssl.go b/typedapi/types/ssl.go index 219d3afd8a..25d5288239 100644 --- a/typedapi/types/ssl.go +++ b/typedapi/types/ssl.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Ssl type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L391-L394 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L401-L404 type Ssl struct { Http FeatureToggle `json:"http"` Transport FeatureToggle `json:"transport"` @@ -34,3 +34,5 @@ func NewSsl() *Ssl { return r } + +// false diff --git a/typedapi/types/rankevalmetricratingtreshold.go b/typedapi/types/stagnatingbackingindices.go similarity index 53% rename from typedapi/types/rankevalmetricratingtreshold.go rename to typedapi/types/stagnatingbackingindices.go index e2bae7cfc7..06f63fd373 100644 --- a/typedapi/types/rankevalmetricratingtreshold.go +++ b/typedapi/types/stagnatingbackingindices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,19 +29,16 @@ import ( "strconv" ) -// RankEvalMetricRatingTreshold type. +// StagnatingBackingIndices type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L34-L40 -type RankEvalMetricRatingTreshold struct { - // K Sets the maximum number of documents retrieved per query. This value will act - // in place of the usual size parameter in the query. - K *int `json:"k,omitempty"` - // RelevantRatingThreshold Sets the rating threshold above which documents are considered to be - // "relevant". - RelevantRatingThreshold *int `json:"relevant_rating_threshold,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/health_report/types.ts#L157-L161 +type StagnatingBackingIndices struct { + FirstOccurrenceTimestamp int64 `json:"first_occurrence_timestamp"` + IndexName string `json:"index_name"` + RetryCount int `json:"retry_count"` } -func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { +func (s *StagnatingBackingIndices) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -56,23 +53,27 @@ func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { switch t { - case "k": - + case "first_occurrence_timestamp": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.Atoi(v) + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - return fmt.Errorf("%s | %w", "K", err) + return fmt.Errorf("%s | %w", "FirstOccurrenceTimestamp", err) } - s.K = &value + s.FirstOccurrenceTimestamp = value case float64: - f := int(v) - s.K = &f + f := int64(v) + s.FirstOccurrenceTimestamp = f } - case "relevant_rating_threshold": + case "index_name": + if err := dec.Decode(&s.IndexName); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + + case "retry_count": var tmp any dec.Decode(&tmp) @@ -80,12 +81,12 @@ func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { case string: value, err := strconv.Atoi(v) if err != nil { - return fmt.Errorf("%s | %w", "RelevantRatingThreshold", err) + return fmt.Errorf("%s | %w", "RetryCount", err) } - s.RelevantRatingThreshold = &value + s.RetryCount = value case float64: f := int(v) - s.RelevantRatingThreshold = &f + s.RetryCount = f } } @@ -93,9 +94,11 @@ func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { return nil } -// NewRankEvalMetricRatingTreshold returns a RankEvalMetricRatingTreshold. -func NewRankEvalMetricRatingTreshold() *RankEvalMetricRatingTreshold { - r := &RankEvalMetricRatingTreshold{} +// NewStagnatingBackingIndices returns a StagnatingBackingIndices. +func NewStagnatingBackingIndices() *StagnatingBackingIndices { + r := &StagnatingBackingIndices{} return r } + +// false diff --git a/typedapi/types/standardanalyzer.go b/typedapi/types/standardanalyzer.go index f16e71ee64..8da06f1d79 100644 --- a/typedapi/types/standardanalyzer.go +++ b/typedapi/types/standardanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StandardAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L95-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L332-L336 type StandardAnalyzer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Stopwords []string `json:"stopwords,omitempty"` @@ -115,3 +115,13 @@ func NewStandardAnalyzer() *StandardAnalyzer { return r } + +// true + +type StandardAnalyzerVariant interface { + StandardAnalyzerCaster() *StandardAnalyzer +} + +func (s *StandardAnalyzer) StandardAnalyzerCaster() *StandardAnalyzer { + return s +} diff --git a/typedapi/types/standarddeviationbounds.go b/typedapi/types/standarddeviationbounds.go index 872979fac2..6759e7a64e 100644 --- a/typedapi/types/standarddeviationbounds.go +++ b/typedapi/types/standarddeviationbounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // StandardDeviationBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L260-L267 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L281-L288 type StandardDeviationBounds struct { Lower *Float64 `json:"lower,omitempty"` LowerPopulation *Float64 `json:"lower_population,omitempty"` @@ -96,3 +96,5 @@ func NewStandardDeviationBounds() *StandardDeviationBounds { return r } + +// false diff --git a/typedapi/types/standarddeviationboundsasstring.go b/typedapi/types/standarddeviationboundsasstring.go index 6c7c8f0b45..6c9277a92a 100644 --- a/typedapi/types/standarddeviationboundsasstring.go +++ b/typedapi/types/standarddeviationboundsasstring.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StandardDeviationBoundsAsString type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L269-L276 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L290-L297 type StandardDeviationBoundsAsString struct { Lower string `json:"lower"` LowerPopulation string `json:"lower_population"` @@ -139,3 +139,5 @@ func NewStandardDeviationBoundsAsString() *StandardDeviationBoundsAsString { return r } + +// false diff --git a/typedapi/types/standardretriever.go b/typedapi/types/standardretriever.go index 7fddacd5f4..673b3bc03f 100644 --- a/typedapi/types/standardretriever.go +++ b/typedapi/types/standardretriever.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StandardRetriever type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Retriever.ts#L43-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Retriever.ts#L51-L62 type StandardRetriever struct { // Collapse Collapses the top documents by a specified key into a single top document per // key. @@ -156,3 +156,13 @@ func NewStandardRetriever() *StandardRetriever { return r } + +// true + +type StandardRetrieverVariant interface { + StandardRetrieverCaster() *StandardRetriever +} + +func (s *StandardRetriever) StandardRetrieverCaster() *StandardRetriever { + return s +} diff --git a/typedapi/types/standardtokenizer.go b/typedapi/types/standardtokenizer.go index 14d4e4d11c..47e6a4b5f0 100644 --- a/typedapi/types/standardtokenizer.go +++ b/typedapi/types/standardtokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StandardTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L105-L108 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L121-L124 type StandardTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewStandardTokenizer() *StandardTokenizer { return r } + +// true + +type StandardTokenizerVariant interface { + StandardTokenizerCaster() *StandardTokenizer +} + +func (s *StandardTokenizer) StandardTokenizerCaster() *StandardTokenizer { + return s +} diff --git a/typedapi/types/statistics.go b/typedapi/types/statistics.go index bcee6733ed..8653d6f7f1 100644 --- a/typedapi/types/statistics.go +++ b/typedapi/types/statistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Statistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/slm/_types/SnapshotLifecycle.ts#L51-L74 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/slm/_types/SnapshotLifecycle.ts#L61-L84 type Statistics struct { Policy *string `json:"policy,omitempty"` RetentionDeletionTime Duration `json:"retention_deletion_time,omitempty"` @@ -191,3 +191,5 @@ func NewStatistics() *Statistics { return r } + +// false diff --git a/typedapi/types/stats.go b/typedapi/types/stats.go index 723e8ae632..5f964a990f 100644 --- a/typedapi/types/stats.go +++ b/typedapi/types/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // Stats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L30-L114 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L30-L114 type Stats struct { // AdaptiveSelection Statistics about adaptive replica selection. AdaptiveSelection map[string]AdaptiveSelection `json:"adaptive_selection,omitempty"` @@ -270,12 +270,14 @@ func (s *Stats) UnmarshalJSON(data []byte) error { // NewStats returns a Stats. func NewStats() *Stats { r := &Stats{ - AdaptiveSelection: make(map[string]AdaptiveSelection, 0), - Attributes: make(map[string]string, 0), - Breakers: make(map[string]Breaker, 0), - ScriptCache: make(map[string][]ScriptCache, 0), - ThreadPool: make(map[string]ThreadCount, 0), + AdaptiveSelection: make(map[string]AdaptiveSelection), + Attributes: make(map[string]string), + Breakers: make(map[string]Breaker), + ScriptCache: make(map[string][]ScriptCache), + ThreadPool: make(map[string]ThreadCount), } return r } + +// false diff --git a/typedapi/types/statsaggregate.go b/typedapi/types/statsaggregate.go index 0d568fc522..e727fad614 100644 --- a/typedapi/types/statsaggregate.go +++ b/typedapi/types/statsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L240-L255 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L257-L273 type StatsAggregate struct { Avg *Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` @@ -170,3 +170,5 @@ func NewStatsAggregate() *StatsAggregate { return r } + +// false diff --git a/typedapi/types/statsaggregation.go b/typedapi/types/statsaggregation.go index 544f58e237..246514411b 100644 --- a/typedapi/types/statsaggregation.go +++ b/typedapi/types/statsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L282-L282 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L291-L291 type StatsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewStatsAggregation() *StatsAggregation { return r } + +// true + +type StatsAggregationVariant interface { + StatsAggregationCaster() *StatsAggregation +} + +func (s *StatsAggregation) StatsAggregationCaster() *StatsAggregation { + return s +} diff --git a/typedapi/types/statsbucketaggregate.go b/typedapi/types/statsbucketaggregate.go index b1c480a280..f4aa07746b 100644 --- a/typedapi/types/statsbucketaggregate.go +++ b/typedapi/types/statsbucketaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StatsBucketAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L257-L258 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L275-L279 type StatsBucketAggregate struct { Avg *Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` @@ -170,3 +170,5 @@ func NewStatsBucketAggregate() *StatsBucketAggregate { return r } + +// false diff --git a/typedapi/types/statsbucketaggregation.go b/typedapi/types/statsbucketaggregation.go index cdcf990953..be6e41e5ac 100644 --- a/typedapi/types/statsbucketaggregation.go +++ b/typedapi/types/statsbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // StatsBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L369-L369 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L410-L410 type StatsBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewStatsBucketAggregation() *StatsBucketAggregation { return r } + +// true + +type StatsBucketAggregationVariant interface { + StatsBucketAggregationCaster() *StatsBucketAggregation +} + +func (s *StatsBucketAggregation) StatsBucketAggregationCaster() *StatsBucketAggregation { + return s +} diff --git a/typedapi/types/status.go b/typedapi/types/status.go index bf594000b5..813f72b78b 100644 --- a/typedapi/types/status.go +++ b/typedapi/types/status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Status type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/snapshot/_types/SnapshotStatus.ts#L26-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/_types/SnapshotStatus.ts#L26-L35 type Status struct { IncludeGlobalState bool `json:"include_global_state"` Indices map[string]SnapshotIndexStats `json:"indices"` @@ -139,8 +139,10 @@ func (s *Status) UnmarshalJSON(data []byte) error { // NewStatus returns a Status. func NewStatus() *Status { r := &Status{ - Indices: make(map[string]SnapshotIndexStats, 0), + Indices: make(map[string]SnapshotIndexStats), } return r } + +// false diff --git a/typedapi/types/forcemergeresponsebody.go b/typedapi/types/statuserror.go similarity index 57% rename from typedapi/types/forcemergeresponsebody.go rename to typedapi/types/statuserror.go index 32eba1d758..117ec9be30 100644 --- a/typedapi/types/forcemergeresponsebody.go +++ b/typedapi/types/statuserror.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,17 +29,15 @@ import ( "strconv" ) -// ForceMergeResponseBody type. +// StatusError type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/forcemerge/_types/response.ts#L22-L28 -type ForceMergeResponseBody struct { - Shards_ ShardStatistics `json:"_shards"` - // Task task contains a task id returned when wait_for_completion=false, - // you can use the task_id to get the status of the task at _tasks/ - Task *string `json:"task,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L44-L47 +type StatusError struct { + Index string `json:"index"` + Message string `json:"message"` } -func (s *ForceMergeResponseBody) UnmarshalJSON(data []byte) error { +func (s *StatusError) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -54,31 +52,40 @@ func (s *ForceMergeResponseBody) UnmarshalJSON(data []byte) error { switch t { - case "_shards": - if err := dec.Decode(&s.Shards_); err != nil { - return fmt.Errorf("%s | %w", "Shards_", err) + case "index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) } + s.Index = o - case "task": + case "message": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Task", err) + return fmt.Errorf("%s | %w", "Message", err) } o := string(tmp[:]) o, err = strconv.Unquote(o) if err != nil { o = string(tmp[:]) } - s.Task = &o + s.Message = o } } return nil } -// NewForceMergeResponseBody returns a ForceMergeResponseBody. -func NewForceMergeResponseBody() *ForceMergeResponseBody { - r := &ForceMergeResponseBody{} +// NewStatusError returns a StatusError. +func NewStatusError() *StatusError { + r := &StatusError{} return r } + +// false diff --git a/typedapi/types/statusinprogress.go b/typedapi/types/statusinprogress.go new file mode 100644 index 0000000000..af0a4bb3e6 --- /dev/null +++ b/typedapi/types/statusinprogress.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StatusInProgress type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L38-L42 +type StatusInProgress struct { + Index string `json:"index"` + ReindexedDocCount int64 `json:"reindexed_doc_count"` + TotalDocCount int64 `json:"total_doc_count"` +} + +func (s *StatusInProgress) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = o + + case "reindexed_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReindexedDocCount", err) + } + s.ReindexedDocCount = value + case float64: + f := int64(v) + s.ReindexedDocCount = f + } + + case "total_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDocCount", err) + } + s.TotalDocCount = value + case float64: + f := int64(v) + s.TotalDocCount = f + } + + } + } + return nil +} + +// NewStatusInProgress returns a StatusInProgress. +func NewStatusInProgress() *StatusInProgress { + r := &StatusInProgress{} + + return r +} + +// false diff --git a/typedapi/types/stemmeroverridetokenfilter.go b/typedapi/types/stemmeroverridetokenfilter.go index fa96ed8a0a..f4c20dedbd 100644 --- a/typedapi/types/stemmeroverridetokenfilter.go +++ b/typedapi/types/stemmeroverridetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StemmerOverrideTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L316-L320 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L315-L319 type StemmerOverrideTokenFilter struct { Rules []string `json:"rules,omitempty"` RulesPath *string `json:"rules_path,omitempty"` @@ -107,3 +107,13 @@ func NewStemmerOverrideTokenFilter() *StemmerOverrideTokenFilter { return r } + +// true + +type StemmerOverrideTokenFilterVariant interface { + StemmerOverrideTokenFilterCaster() *StemmerOverrideTokenFilter +} + +func (s *StemmerOverrideTokenFilter) StemmerOverrideTokenFilterCaster() *StemmerOverrideTokenFilter { + return s +} diff --git a/typedapi/types/stemmertokenfilter.go b/typedapi/types/stemmertokenfilter.go index ac008e6a8c..e76644938a 100644 --- a/typedapi/types/stemmertokenfilter.go +++ b/typedapi/types/stemmertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StemmerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L322-L326 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L321-L325 type StemmerTokenFilter struct { Language *string `json:"language,omitempty"` Type string `json:"type,omitempty"` @@ -100,3 +100,13 @@ func NewStemmerTokenFilter() *StemmerTokenFilter { return r } + +// true + +type StemmerTokenFilterVariant interface { + StemmerTokenFilterCaster() *StemmerTokenFilter +} + +func (s *StemmerTokenFilter) StemmerTokenFilterCaster() *StemmerTokenFilter { + return s +} diff --git a/typedapi/types/stepkey.go b/typedapi/types/stepkey.go index 8c73eac693..f9145bff44 100644 --- a/typedapi/types/stepkey.go +++ b/typedapi/types/stepkey.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,13 @@ import ( // StepKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/move_to_step/types.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/move_to_step/types.ts#L20-L31 type StepKey struct { - Action string `json:"action"` - Name string `json:"name"` - Phase string `json:"phase"` + // Action The optional action to which the index will be moved. + Action *string `json:"action,omitempty"` + // Name The optional step name to which the index will be moved. + Name *string `json:"name,omitempty"` + Phase string `json:"phase"` } func (s *StepKey) UnmarshalJSON(data []byte) error { @@ -63,7 +65,7 @@ func (s *StepKey) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Action = o + s.Action = &o case "name": var tmp json.RawMessage @@ -75,7 +77,7 @@ func (s *StepKey) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Name = o + s.Name = &o case "phase": var tmp json.RawMessage @@ -100,3 +102,13 @@ func NewStepKey() *StepKey { return r } + +// true + +type StepKeyVariant interface { + StepKeyCaster() *StepKey +} + +func (s *StepKey) StepKeyCaster() *StepKey { + return s +} diff --git a/typedapi/types/stopanalyzer.go b/typedapi/types/stopanalyzer.go index 76ef6c70cc..341f9301da 100644 --- a/typedapi/types/stopanalyzer.go +++ b/typedapi/types/stopanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StopAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L101-L106 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L338-L343 type StopAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -118,3 +118,13 @@ func NewStopAnalyzer() *StopAnalyzer { return r } + +// true + +type StopAnalyzerVariant interface { + StopAnalyzerCaster() *StopAnalyzer +} + +func (s *StopAnalyzer) StopAnalyzerCaster() *StopAnalyzer { + return s +} diff --git a/typedapi/types/stoptokenfilter.go b/typedapi/types/stoptokenfilter.go index 5888e14daa..e7f1ad7c8e 100644 --- a/typedapi/types/stoptokenfilter.go +++ b/typedapi/types/stoptokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StopTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L97-L103 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L96-L102 type StopTokenFilter struct { IgnoreCase *bool `json:"ignore_case,omitempty"` RemoveTrailing *bool `json:"remove_trailing,omitempty"` @@ -150,3 +150,13 @@ func NewStopTokenFilter() *StopTokenFilter { return r } + +// true + +type StopTokenFilterVariant interface { + StopTokenFilterCaster() *StopTokenFilter +} + +func (s *StopTokenFilter) StopTokenFilterCaster() *StopTokenFilter { + return s +} diff --git a/typedapi/types/stopwords.go b/typedapi/types/stopwords.go index 8bbf3664af..3d678f623a 100644 --- a/typedapi/types/stopwords.go +++ b/typedapi/types/stopwords.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // StopWords type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/StopWords.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/StopWords.ts#L20-L26 type StopWords []string + +type StopWordsVariant interface { + StopWordsCaster() *StopWords +} diff --git a/typedapi/types/storage.go b/typedapi/types/storage.go index 538eaf8691..43bdef9307 100644 --- a/typedapi/types/storage.go +++ b/typedapi/types/storage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // Storage type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L509-L518 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L534-L543 type Storage struct { // AllowMmap You can restrict the use of the mmapfs and the related hybridfs store type // via the setting node.store.allow_mmap. @@ -91,3 +91,13 @@ func NewStorage() *Storage { return r } + +// true + +type StorageVariant interface { + StorageCaster() *Storage +} + +func (s *Storage) StorageCaster() *Storage { + return s +} diff --git a/typedapi/types/storedscript.go b/typedapi/types/storedscript.go index 186dd75b6d..d7f1843685 100644 --- a/typedapi/types/storedscript.go +++ b/typedapi/types/storedscript.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,12 +33,14 @@ import ( // StoredScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Scripting.ts#L47-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Scripting.ts#L47-L59 type StoredScript struct { - // Lang Specifies the language the script is written in. + // Lang The language the script is written in. + // For serach templates, use `mustache`. Lang scriptlanguage.ScriptLanguage `json:"lang"` Options map[string]string `json:"options,omitempty"` // Source The script source. + // For search templates, an object containing the search template. Source string `json:"source"` } @@ -90,8 +92,18 @@ func (s *StoredScript) UnmarshalJSON(data []byte) error { // NewStoredScript returns a StoredScript. func NewStoredScript() *StoredScript { r := &StoredScript{ - Options: make(map[string]string, 0), + Options: make(map[string]string), } return r } + +// true + +type StoredScriptVariant interface { + StoredScriptCaster() *StoredScript +} + +func (s *StoredScript) StoredScriptCaster() *StoredScript { + return s +} diff --git a/typedapi/types/storestats.go b/typedapi/types/storestats.go index f066d69991..2a2b347d88 100644 --- a/typedapi/types/storestats.go +++ b/typedapi/types/storestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StoreStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L368-L395 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L371-L398 type StoreStats struct { // Reserved A prediction of how much larger the shard stores will eventually grow due to // ongoing peer recoveries, restoring snapshots, and similar activities. @@ -140,3 +140,5 @@ func NewStoreStats() *StoreStats { return r } + +// false diff --git a/typedapi/types/streamresult.go b/typedapi/types/streamresult.go new file mode 100644 index 0000000000..036bf219e8 --- /dev/null +++ b/typedapi/types/streamresult.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// StreamResult type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Binary.ts#L27-L27 +type StreamResult []byte diff --git a/typedapi/types/stringifiedboolean.go b/typedapi/types/stringifiedboolean.go index b027802677..ab556b8cee 100644 --- a/typedapi/types/stringifiedboolean.go +++ b/typedapi/types/stringifiedboolean.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // bool // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_spec_utils/Stringified.ts#L20-L27 type Stringifiedboolean any + +type StringifiedbooleanVariant interface { + StringifiedbooleanCaster() *Stringifiedboolean +} diff --git a/typedapi/types/stringifieddouble.go b/typedapi/types/stringifieddouble.go new file mode 100644 index 0000000000..44f3a938c2 --- /dev/null +++ b/typedapi/types/stringifieddouble.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// Stringifieddouble holds the union for the following types: +// +// Float64 +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_spec_utils/Stringified.ts#L20-L27 +type Stringifieddouble any diff --git a/typedapi/types/stringifiedepochtimeunitmillis.go b/typedapi/types/stringifiedepochtimeunitmillis.go index 046843df60..edef5d040d 100644 --- a/typedapi/types/stringifiedepochtimeunitmillis.go +++ b/typedapi/types/stringifiedepochtimeunitmillis.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_spec_utils/Stringified.ts#L20-L27 type StringifiedEpochTimeUnitMillis any + +type StringifiedEpochTimeUnitMillisVariant interface { + StringifiedEpochTimeUnitMillisCaster() *StringifiedEpochTimeUnitMillis +} diff --git a/typedapi/types/stringifiedepochtimeunitseconds.go b/typedapi/types/stringifiedepochtimeunitseconds.go index 34351f8252..b3a4858dc1 100644 --- a/typedapi/types/stringifiedepochtimeunitseconds.go +++ b/typedapi/types/stringifiedepochtimeunitseconds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_spec_utils/Stringified.ts#L20-L27 type StringifiedEpochTimeUnitSeconds any diff --git a/typedapi/types/stringifiedinteger.go b/typedapi/types/stringifiedinteger.go index 68809e830d..8d08da447c 100644 --- a/typedapi/types/stringifiedinteger.go +++ b/typedapi/types/stringifiedinteger.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // int // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_spec_utils/Stringified.ts#L20-L27 type Stringifiedinteger any + +type StringifiedintegerVariant interface { + StringifiedintegerCaster() *Stringifiedinteger +} diff --git a/typedapi/types/stringifiedversionnumber.go b/typedapi/types/stringifiedversionnumber.go index a3560182d0..46b2ac2a23 100644 --- a/typedapi/types/stringifiedversionnumber.go +++ b/typedapi/types/stringifiedversionnumber.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_spec_utils/Stringified.ts#L20-L27 type StringifiedVersionNumber any diff --git a/typedapi/types/stringraretermsaggregate.go b/typedapi/types/stringraretermsaggregate.go index 16922aa846..dc18eae57c 100644 --- a/typedapi/types/stringraretermsaggregate.go +++ b/typedapi/types/stringraretermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // StringRareTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L445-L449 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L483-L487 type StringRareTermsAggregate struct { Buckets BucketsStringRareTermsBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewStringRareTermsAggregate() *StringRareTermsAggregate { return r } + +// false diff --git a/typedapi/types/stringraretermsbucket.go b/typedapi/types/stringraretermsbucket.go index 0c66bd1b84..689feb5843 100644 --- a/typedapi/types/stringraretermsbucket.go +++ b/typedapi/types/stringraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // StringRareTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L451-L453 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L489-L491 type StringRareTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -498,6 +498,13 @@ func (s *StringRareTermsBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -631,8 +638,10 @@ func (s StringRareTermsBucket) MarshalJSON() ([]byte, error) { // NewStringRareTermsBucket returns a StringRareTermsBucket. func NewStringRareTermsBucket() *StringRareTermsBucket { r := &StringRareTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/stringstatsaggregate.go b/typedapi/types/stringstatsaggregate.go index 07f4d71f8f..9c32b04e83 100644 --- a/typedapi/types/stringstatsaggregate.go +++ b/typedapi/types/stringstatsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StringStatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L700-L711 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L793-L804 type StringStatsAggregate struct { AvgLength *Float64 `json:"avg_length,omitempty"` AvgLengthAsString *string `json:"avg_length_as_string,omitempty"` @@ -152,3 +152,5 @@ func NewStringStatsAggregate() *StringStatsAggregate { return r } + +// false diff --git a/typedapi/types/stringstatsaggregation.go b/typedapi/types/stringstatsaggregation.go index 4b749eaaf3..1da5230a47 100644 --- a/typedapi/types/stringstatsaggregation.go +++ b/typedapi/types/stringstatsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StringStatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L284-L290 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L293-L299 type StringStatsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -98,3 +98,13 @@ func NewStringStatsAggregation() *StringStatsAggregation { return r } + +// true + +type StringStatsAggregationVariant interface { + StringStatsAggregationCaster() *StringStatsAggregation +} + +func (s *StringStatsAggregation) StringStatsAggregationCaster() *StringStatsAggregation { + return s +} diff --git a/typedapi/types/stringtermsaggregate.go b/typedapi/types/stringtermsaggregate.go index 3d2069e0e7..09a4b6a246 100644 --- a/typedapi/types/stringtermsaggregate.go +++ b/typedapi/types/stringtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StringTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L386-L391 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L424-L429 type StringTermsAggregate struct { Buckets BucketsStringTermsBucket `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewStringTermsAggregate() *StringTermsAggregate { return r } + +// false diff --git a/typedapi/types/stringtermsbucket.go b/typedapi/types/stringtermsbucket.go index dc5c51ba92..d1886f3eb9 100644 --- a/typedapi/types/stringtermsbucket.go +++ b/typedapi/types/stringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // StringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L397-L399 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L435-L437 type StringTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -507,6 +507,13 @@ func (s *StringTermsBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -640,8 +647,10 @@ func (s StringTermsBucket) MarshalJSON() ([]byte, error) { // NewStringTermsBucket returns a StringTermsBucket. func NewStringTermsBucket() *StringTermsBucket { r := &StringTermsBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/stupidbackoffsmoothingmodel.go b/typedapi/types/stupidbackoffsmoothingmodel.go index b6344c5638..23f83f9209 100644 --- a/typedapi/types/stupidbackoffsmoothingmodel.go +++ b/typedapi/types/stupidbackoffsmoothingmodel.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // StupidBackoffSmoothingModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L463-L468 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L463-L468 type StupidBackoffSmoothingModel struct { // Discount A constant factor that the lower order n-gram model is discounted by. Discount Float64 `json:"discount"` @@ -79,3 +79,13 @@ func NewStupidBackoffSmoothingModel() *StupidBackoffSmoothingModel { return r } + +// true + +type StupidBackoffSmoothingModelVariant interface { + StupidBackoffSmoothingModelCaster() *StupidBackoffSmoothingModel +} + +func (s *StupidBackoffSmoothingModel) StupidBackoffSmoothingModelCaster() *StupidBackoffSmoothingModel { + return s +} diff --git a/typedapi/types/suggest.go b/typedapi/types/suggest.go index dcb00c06b3..90718b2cc5 100644 --- a/typedapi/types/suggest.go +++ b/typedapi/types/suggest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,5 @@ package types // PhraseSuggest // TermSuggest // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L34-L40 type Suggest any diff --git a/typedapi/types/suggestcontext.go b/typedapi/types/suggestcontext.go index 89d6ab9c4e..6591e381b4 100644 --- a/typedapi/types/suggestcontext.go +++ b/typedapi/types/suggestcontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SuggestContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/specialized.ts#L43-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L43-L48 type SuggestContext struct { Name string `json:"name"` Path *string `json:"path,omitempty"` @@ -99,3 +99,13 @@ func NewSuggestContext() *SuggestContext { return r } + +// true + +type SuggestContextVariant interface { + SuggestContextCaster() *SuggestContext +} + +func (s *SuggestContext) SuggestContextCaster() *SuggestContext { + return s +} diff --git a/typedapi/types/suggester.go b/typedapi/types/suggester.go index 98a67af8a4..e7dad4e0ad 100644 --- a/typedapi/types/suggester.go +++ b/typedapi/types/suggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Suggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L101-L107 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L101-L107 type Suggester struct { Suggesters map[string]FieldSuggester `json:"-"` // Text Global suggest text, to avoid repetition when the same text is used in @@ -116,8 +116,18 @@ func (s Suggester) MarshalJSON() ([]byte, error) { // NewSuggester returns a Suggester. func NewSuggester() *Suggester { r := &Suggester{ - Suggesters: make(map[string]FieldSuggester, 0), + Suggesters: make(map[string]FieldSuggester), } return r } + +// true + +type SuggesterVariant interface { + SuggesterCaster() *Suggester +} + +func (s *Suggester) SuggesterCaster() *Suggester { + return s +} diff --git a/typedapi/types/suggestfuzziness.go b/typedapi/types/suggestfuzziness.go index 41272b9e94..439b70e6bc 100644 --- a/typedapi/types/suggestfuzziness.go +++ b/typedapi/types/suggestfuzziness.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SuggestFuzziness type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L196-L224 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L196-L224 type SuggestFuzziness struct { // Fuzziness The fuzziness factor. Fuzziness Fuzziness `json:"fuzziness,omitempty"` @@ -138,3 +138,13 @@ func NewSuggestFuzziness() *SuggestFuzziness { return r } + +// true + +type SuggestFuzzinessVariant interface { + SuggestFuzzinessCaster() *SuggestFuzziness +} + +func (s *SuggestFuzziness) SuggestFuzzinessCaster() *SuggestFuzziness { + return s +} diff --git a/typedapi/types/sumaggregate.go b/typedapi/types/sumaggregate.go index 5480219199..e2e9fbfb8f 100644 --- a/typedapi/types/sumaggregate.go +++ b/typedapi/types/sumaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SumAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L203-L207 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L211-L216 type SumAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewSumAggregate() *SumAggregate { return r } + +// false diff --git a/typedapi/types/sumaggregation.go b/typedapi/types/sumaggregation.go index 15b5da7d3e..8986be8fdf 100644 --- a/typedapi/types/sumaggregation.go +++ b/typedapi/types/sumaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SumAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L292-L292 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L301-L301 type SumAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewSumAggregation() *SumAggregation { return r } + +// true + +type SumAggregationVariant interface { + SumAggregationCaster() *SumAggregation +} + +func (s *SumAggregation) SumAggregationCaster() *SumAggregation { + return s +} diff --git a/typedapi/types/sumbucketaggregation.go b/typedapi/types/sumbucketaggregation.go index df1699fa0a..9c08362235 100644 --- a/typedapi/types/sumbucketaggregation.go +++ b/typedapi/types/sumbucketaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SumBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/pipeline.ts#L371-L371 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/pipeline.ts#L412-L415 type SumBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` @@ -93,3 +93,13 @@ func NewSumBucketAggregation() *SumBucketAggregation { return r } + +// true + +type SumBucketAggregationVariant interface { + SumBucketAggregationCaster() *SumBucketAggregation +} + +func (s *SumBucketAggregation) SumBucketAggregationCaster() *SumBucketAggregation { + return s +} diff --git a/typedapi/types/summary.go b/typedapi/types/summary.go index e0aebd6245..c608fb39ca 100644 --- a/typedapi/types/summary.go +++ b/typedapi/types/summary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // Summary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/enrich/_types/Policy.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/enrich/_types/Policy.ts#L24-L26 type Summary struct { Config map[policytype.PolicyType]EnrichPolicy `json:"config"` } @@ -34,8 +34,10 @@ type Summary struct { // NewSummary returns a Summary. func NewSummary() *Summary { r := &Summary{ - Config: make(map[policytype.PolicyType]EnrichPolicy, 0), + Config: make(map[policytype.PolicyType]EnrichPolicy), } return r } + +// false diff --git a/typedapi/types/summaryinfo.go b/typedapi/types/summaryinfo.go new file mode 100644 index 0000000000..dd13dc7ded --- /dev/null +++ b/typedapi/types/summaryinfo.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// SummaryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L193-L202 +type SummaryInfo struct { + // Read A collection of statistics that summarise the results of the read operations + // in the test. + Read ReadSummaryInfo `json:"read"` + // Write A collection of statistics that summarise the results of the write operations + // in the test. + Write WriteSummaryInfo `json:"write"` +} + +// NewSummaryInfo returns a SummaryInfo. +func NewSummaryInfo() *SummaryInfo { + r := &SummaryInfo{} + + return r +} + +// false diff --git a/typedapi/types/swedishanalyzer.go b/typedapi/types/swedishanalyzer.go new file mode 100644 index 0000000000..3ce7621184 --- /dev/null +++ b/typedapi/types/swedishanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SwedishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L283-L288 +type SwedishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SwedishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SwedishAnalyzer) MarshalJSON() ([]byte, error) { + type innerSwedishAnalyzer SwedishAnalyzer + tmp := innerSwedishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "swedish" + + return json.Marshal(tmp) +} + +// NewSwedishAnalyzer returns a SwedishAnalyzer. +func NewSwedishAnalyzer() *SwedishAnalyzer { + r := &SwedishAnalyzer{} + + return r +} + +// true + +type SwedishAnalyzerVariant interface { + SwedishAnalyzerCaster() *SwedishAnalyzer +} + +func (s *SwedishAnalyzer) SwedishAnalyzerCaster() *SwedishAnalyzer { + return s +} diff --git a/typedapi/types/synccontainer.go b/typedapi/types/synccontainer.go index 544b2cbf3e..47d06340af 100644 --- a/typedapi/types/synccontainer.go +++ b/typedapi/types/synccontainer.go @@ -16,22 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // SyncContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L169-L175 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L169-L175 type SyncContainer struct { + AdditionalSyncContainerProperty map[string]json.RawMessage `json:"-"` // Time Specifies that the transform uses a time field to synchronize the source and // destination indices. Time *TimeSync `json:"time,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s SyncContainer) MarshalJSON() ([]byte, error) { + type opt SyncContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSyncContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSyncContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewSyncContainer returns a SyncContainer. func NewSyncContainer() *SyncContainer { - r := &SyncContainer{} + r := &SyncContainer{ + AdditionalSyncContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type SyncContainerVariant interface { + SyncContainerCaster() *SyncContainer +} + +func (s *SyncContainer) SyncContainerCaster() *SyncContainer { + return s +} diff --git a/typedapi/types/syncjobconnectorreference.go b/typedapi/types/syncjobconnectorreference.go index 6c40706c36..5ad6d0978d 100644 --- a/typedapi/types/syncjobconnectorreference.go +++ b/typedapi/types/syncjobconnectorreference.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SyncJobConnectorReference type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/SyncJob.ts#L31-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/SyncJob.ts#L31-L40 type SyncJobConnectorReference struct { Configuration ConnectorConfiguration `json:"configuration"` Filtering FilteringRules `json:"filtering"` @@ -130,3 +130,5 @@ func NewSyncJobConnectorReference() *SyncJobConnectorReference { return r } + +// false diff --git a/typedapi/types/syncrulesfeature.go b/typedapi/types/syncrulesfeature.go index cbf1cdb5ce..57a7df4ca5 100644 --- a/typedapi/types/syncrulesfeature.go +++ b/typedapi/types/syncrulesfeature.go @@ -16,16 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // SyncRulesFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L219-L222 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L219-L228 type SyncRulesFeature struct { + // Advanced Indicates whether advanced sync rules are enabled. Advanced *FeatureEnabled `json:"advanced,omitempty"` - Basic *FeatureEnabled `json:"basic,omitempty"` + // Basic Indicates whether basic sync rules are enabled. + Basic *FeatureEnabled `json:"basic,omitempty"` } // NewSyncRulesFeature returns a SyncRulesFeature. @@ -34,3 +36,13 @@ func NewSyncRulesFeature() *SyncRulesFeature { return r } + +// true + +type SyncRulesFeatureVariant interface { + SyncRulesFeatureCaster() *SyncRulesFeature +} + +func (s *SyncRulesFeature) SyncRulesFeatureCaster() *SyncRulesFeature { + return s +} diff --git a/typedapi/types/synonymgraphtokenfilter.go b/typedapi/types/synonymgraphtokenfilter.go index 97422af1a1..3150982a2e 100644 --- a/typedapi/types/synonymgraphtokenfilter.go +++ b/typedapi/types/synonymgraphtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SynonymGraphTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L110-L120 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L109-L119 type SynonymGraphTokenFilter struct { Expand *bool `json:"expand,omitempty"` Format *synonymformat.SynonymFormat `json:"format,omitempty"` @@ -192,3 +192,13 @@ func NewSynonymGraphTokenFilter() *SynonymGraphTokenFilter { return r } + +// true + +type SynonymGraphTokenFilterVariant interface { + SynonymGraphTokenFilterCaster() *SynonymGraphTokenFilter +} + +func (s *SynonymGraphTokenFilter) SynonymGraphTokenFilterCaster() *SynonymGraphTokenFilter { + return s +} diff --git a/typedapi/types/synonymrule.go b/typedapi/types/synonymrule.go index 95a014a038..b687dfba22 100644 --- a/typedapi/types/synonymrule.go +++ b/typedapi/types/synonymrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( // SynonymRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/_types/SynonymRule.ts#L26-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/_types/SynonymRule.ts#L26-L37 type SynonymRule struct { - // Id Synonym Rule identifier + // Id The identifier for the synonym rule. + // If you do not specify a synonym rule ID when you create a rule, an identifier + // is created automatically by Elasticsearch. Id *string `json:"id,omitempty"` - // Synonyms Synonyms, in Solr format, that conform the synonym rule. See - // https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html#_solr_synonyms_2 + // Synonyms The synonyms that conform the synonym rule in Solr format. Synonyms string `json:"synonyms"` } @@ -75,3 +76,13 @@ func NewSynonymRule() *SynonymRule { return r } + +// true + +type SynonymRuleVariant interface { + SynonymRuleCaster() *SynonymRule +} + +func (s *SynonymRule) SynonymRuleCaster() *SynonymRule { + return s +} diff --git a/typedapi/types/synonymruleread.go b/typedapi/types/synonymruleread.go index 8a08ba6865..bb76613961 100644 --- a/typedapi/types/synonymruleread.go +++ b/typedapi/types/synonymruleread.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // SynonymRuleRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/_types/SynonymRule.ts#L38-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/_types/SynonymRule.ts#L40-L49 type SynonymRuleRead struct { // Id Synonym Rule identifier Id string `json:"id"` @@ -75,3 +75,5 @@ func NewSynonymRuleRead() *SynonymRuleRead { return r } + +// false diff --git a/typedapi/types/synonymssetitem.go b/typedapi/types/synonymssetitem.go index 62bb280a5e..396fdd6ba5 100644 --- a/typedapi/types/synonymssetitem.go +++ b/typedapi/types/synonymssetitem.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // SynonymsSetItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L30-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L36-L45 type SynonymsSetItem struct { // Count Number of synonym rules that the synonym set contains Count int `json:"count"` @@ -86,3 +86,5 @@ func NewSynonymsSetItem() *SynonymsSetItem { return r } + +// false diff --git a/typedapi/types/synonymtokenfilter.go b/typedapi/types/synonymtokenfilter.go index 2441576bdf..5f7f4983ef 100644 --- a/typedapi/types/synonymtokenfilter.go +++ b/typedapi/types/synonymtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // SynonymTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L122-L132 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L121-L131 type SynonymTokenFilter struct { Expand *bool `json:"expand,omitempty"` Format *synonymformat.SynonymFormat `json:"format,omitempty"` @@ -192,3 +192,13 @@ func NewSynonymTokenFilter() *SynonymTokenFilter { return r } + +// true + +type SynonymTokenFilterVariant interface { + SynonymTokenFilterCaster() *SynonymTokenFilter +} + +func (s *SynonymTokenFilter) SynonymTokenFilterCaster() *SynonymTokenFilter { + return s +} diff --git a/typedapi/types/tablevaluescontainer.go b/typedapi/types/tablevaluescontainer.go index 89713a05d4..2cebcad185 100644 --- a/typedapi/types/tablevaluescontainer.go +++ b/typedapi/types/tablevaluescontainer.go @@ -16,23 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // TableValuesContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/esql/_types/TableValuesContainer.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/_types/TableValuesContainer.ts#L22-L28 type TableValuesContainer struct { - Float64 [][]Float64 `json:"double,omitempty"` - Int [][]int `json:"integer,omitempty"` - Int64 [][]int64 `json:"long,omitempty"` - Keyword [][]string `json:"keyword,omitempty"` + AdditionalTableValuesContainerProperty map[string]json.RawMessage `json:"-"` + Float64 [][]Float64 `json:"double,omitempty"` + Int [][]int `json:"integer,omitempty"` + Int64 [][]int64 `json:"long,omitempty"` + Keyword [][]string `json:"keyword,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TableValuesContainer) MarshalJSON() ([]byte, error) { + type opt TableValuesContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTableValuesContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTableValuesContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewTableValuesContainer returns a TableValuesContainer. func NewTableValuesContainer() *TableValuesContainer { - r := &TableValuesContainer{} + r := &TableValuesContainer{ + AdditionalTableValuesContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type TableValuesContainerVariant interface { + TableValuesContainerCaster() *TableValuesContainer +} + +func (s *TableValuesContainer) TableValuesContainerCaster() *TableValuesContainer { + return s +} diff --git a/typedapi/types/tablevaluesintegervalue.go b/typedapi/types/tablevaluesintegervalue.go index de895b4714..7c90007e5c 100644 --- a/typedapi/types/tablevaluesintegervalue.go +++ b/typedapi/types/tablevaluesintegervalue.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TableValuesIntegerValue type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/esql/_types/TableValuesContainer.ts#L30-L30 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/_types/TableValuesContainer.ts#L30-L30 type TableValuesIntegerValue []int + +type TableValuesIntegerValueVariant interface { + TableValuesIntegerValueCaster() *TableValuesIntegerValue +} diff --git a/typedapi/types/tablevalueskeywordvalue.go b/typedapi/types/tablevalueskeywordvalue.go index 697f1ad2fa..75c9fb0ba2 100644 --- a/typedapi/types/tablevalueskeywordvalue.go +++ b/typedapi/types/tablevalueskeywordvalue.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TableValuesKeywordValue type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/esql/_types/TableValuesContainer.ts#L31-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/_types/TableValuesContainer.ts#L31-L31 type TableValuesKeywordValue []string + +type TableValuesKeywordValueVariant interface { + TableValuesKeywordValueCaster() *TableValuesKeywordValue +} diff --git a/typedapi/types/tablevalueslongdouble.go b/typedapi/types/tablevalueslongdouble.go index e83a3501c1..adf063bafd 100644 --- a/typedapi/types/tablevalueslongdouble.go +++ b/typedapi/types/tablevalueslongdouble.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TableValuesLongDouble type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/esql/_types/TableValuesContainer.ts#L33-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/_types/TableValuesContainer.ts#L33-L33 type TableValuesLongDouble []Float64 + +type TableValuesLongDoubleVariant interface { + TableValuesLongDoubleCaster() *TableValuesLongDouble +} diff --git a/typedapi/types/tablevalueslongvalue.go b/typedapi/types/tablevalueslongvalue.go index e36fea7d00..6f1564559a 100644 --- a/typedapi/types/tablevalueslongvalue.go +++ b/typedapi/types/tablevalueslongvalue.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TableValuesLongValue type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/esql/_types/TableValuesContainer.ts#L32-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/esql/_types/TableValuesContainer.ts#L32-L32 type TableValuesLongValue []int64 + +type TableValuesLongValueVariant interface { + TableValuesLongValueCaster() *TableValuesLongValue +} diff --git a/typedapi/types/targetmeanencodingpreprocessor.go b/typedapi/types/targetmeanencodingpreprocessor.go index 1b103b0331..d06967ef96 100644 --- a/typedapi/types/targetmeanencodingpreprocessor.go +++ b/typedapi/types/targetmeanencodingpreprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TargetMeanEncodingPreprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L49-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L49-L54 type TargetMeanEncodingPreprocessor struct { DefaultValue Float64 `json:"default_value"` FeatureName string `json:"feature_name"` @@ -110,8 +110,18 @@ func (s *TargetMeanEncodingPreprocessor) UnmarshalJSON(data []byte) error { // NewTargetMeanEncodingPreprocessor returns a TargetMeanEncodingPreprocessor. func NewTargetMeanEncodingPreprocessor() *TargetMeanEncodingPreprocessor { r := &TargetMeanEncodingPreprocessor{ - TargetMap: make(map[string]Float64, 0), + TargetMap: make(map[string]Float64), } return r } + +// true + +type TargetMeanEncodingPreprocessorVariant interface { + TargetMeanEncodingPreprocessorCaster() *TargetMeanEncodingPreprocessor +} + +func (s *TargetMeanEncodingPreprocessor) TargetMeanEncodingPreprocessorCaster() *TargetMeanEncodingPreprocessor { + return s +} diff --git a/typedapi/types/taskfailure.go b/typedapi/types/taskfailure.go index c0ebd04ab4..55f4b065d7 100644 --- a/typedapi/types/taskfailure.go +++ b/typedapi/types/taskfailure.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TaskFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Errors.ts#L68-L73 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Errors.ts#L67-L72 type TaskFailure struct { NodeId string `json:"node_id"` Reason ErrorCause `json:"reason"` @@ -102,3 +102,5 @@ func NewTaskFailure() *TaskFailure { return r } + +// false diff --git a/typedapi/types/taskid.go b/typedapi/types/taskid.go index c869dc94c2..fd6f842e7a 100644 --- a/typedapi/types/taskid.go +++ b/typedapi/types/taskid.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // string // int // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L132-L132 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L136-L136 type TaskId any diff --git a/typedapi/types/taskinfo.go b/typedapi/types/taskinfo.go index e43b3a5b59..a3b0e0f2da 100644 --- a/typedapi/types/taskinfo.go +++ b/typedapi/types/taskinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,11 +31,21 @@ import ( // TaskInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/tasks/_types/TaskInfo.ts#L32-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/tasks/_types/TaskInfo.ts#L32-L58 type TaskInfo struct { - Action string `json:"action"` - Cancellable bool `json:"cancellable"` - Cancelled *bool `json:"cancelled,omitempty"` + Action string `json:"action"` + Cancellable bool `json:"cancellable"` + Cancelled *bool `json:"cancelled,omitempty"` + // Description Human readable text that identifies the particular request that the task is + // performing. + // For example, it might identify the search request being performed by a search + // task. + // Other kinds of tasks have different descriptions, like `_reindex` which has + // the source and the destination, or `_bulk` which just has the number of + // requests and the destination indices. + // Many requests will have only an empty description because more detailed + // information about the request is not easily available or particularly helpful + // in identifying the request. Description *string `json:"description,omitempty"` Headers map[string]string `json:"headers"` Id int64 `json:"id"` @@ -44,7 +54,13 @@ type TaskInfo struct { RunningTime Duration `json:"running_time,omitempty"` RunningTimeInNanos int64 `json:"running_time_in_nanos"` StartTimeInMillis int64 `json:"start_time_in_millis"` - // Status Task status information can vary wildly from task to task. + // Status The internal status of the task, which varies from task to task. + // The format also varies. + // While the goal is to keep the status for a particular task consistent from + // version to version, this is not always possible because sometimes the + // implementation changes. + // Fields might be removed from the status for a particular request so any + // parsing you do of the status might break in minor releases. Status json.RawMessage `json:"status,omitempty"` Type string `json:"type"` } @@ -189,8 +205,10 @@ func (s *TaskInfo) UnmarshalJSON(data []byte) error { // NewTaskInfo returns a TaskInfo. func NewTaskInfo() *TaskInfo { r := &TaskInfo{ - Headers: make(map[string]string, 0), + Headers: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/taskinfos.go b/typedapi/types/taskinfos.go index 4e4e193d35..23ae6110b7 100644 --- a/typedapi/types/taskinfos.go +++ b/typedapi/types/taskinfos.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // []TaskInfo // map[string]ParentTaskInfo // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/tasks/_types/TaskListResponseBase.ts#L40-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/tasks/_types/TaskListResponseBase.ts#L40-L43 type TaskInfos any diff --git a/typedapi/types/tasksrecord.go b/typedapi/types/tasksrecord.go index f077e3065c..f8db7f3b52 100644 --- a/typedapi/types/tasksrecord.go +++ b/typedapi/types/tasksrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TasksRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/tasks/types.ts#L22-L101 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/tasks/types.ts#L22-L101 type TasksRecord struct { // Action The task action. Action *string `json:"action,omitempty"` @@ -257,3 +257,5 @@ func NewTasksRecord() *TasksRecord { return r } + +// false diff --git a/typedapi/types/tdigest.go b/typedapi/types/tdigest.go index 7aa4477ff0..229612ad02 100644 --- a/typedapi/types/tdigest.go +++ b/typedapi/types/tdigest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TDigest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L223-L228 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L232-L237 type TDigest struct { // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm // to `20 * compression`, enabling control of memory usage and approximation @@ -81,3 +81,13 @@ func NewTDigest() *TDigest { return r } + +// true + +type TDigestVariant interface { + TDigestCaster() *TDigest +} + +func (s *TDigest) TDigestCaster() *TDigest { + return s +} diff --git a/typedapi/types/tdigestpercentileranksaggregate.go b/typedapi/types/tdigestpercentileranksaggregate.go index 6d78c23dc7..42e3f9a888 100644 --- a/typedapi/types/tdigestpercentileranksaggregate.go +++ b/typedapi/types/tdigestpercentileranksaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TDigestPercentileRanksAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L175-L176 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L177-L178 type TDigestPercentileRanksAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *TDigestPercentileRanksAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewTDigestPercentileRanksAggregate() *TDigestPercentileRanksAggregate { return r } + +// false diff --git a/typedapi/types/tdigestpercentilesaggregate.go b/typedapi/types/tdigestpercentilesaggregate.go index d3407b67c8..5260bbf972 100644 --- a/typedapi/types/tdigestpercentilesaggregate.go +++ b/typedapi/types/tdigestpercentilesaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TDigestPercentilesAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L172-L173 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L174-L175 type TDigestPercentilesAggregate struct { Meta Metadata `json:"meta,omitempty"` Values Percentiles `json:"values"` @@ -64,7 +64,7 @@ func (s *TDigestPercentilesAggregate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(KeyedPercentiles, 0) + o := make(map[string]string, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Values", err) } @@ -88,3 +88,5 @@ func NewTDigestPercentilesAggregate() *TDigestPercentilesAggregate { return r } + +// false diff --git a/typedapi/types/template.go b/typedapi/types/template.go index a1e0e25936..5fd778802b 100644 --- a/typedapi/types/template.go +++ b/typedapi/types/template.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // Template type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37 type Template struct { Aliases map[string]Alias `json:"aliases"` Mappings TypeMapping `json:"mappings"` @@ -32,8 +32,10 @@ type Template struct { // NewTemplate returns a Template. func NewTemplate() *Template { r := &Template{ - Aliases: make(map[string]Alias, 0), + Aliases: make(map[string]Alias), } return r } + +// false diff --git a/typedapi/types/templateconfig.go b/typedapi/types/templateconfig.go index 3f334ec043..2a6875ab4a 100644 --- a/typedapi/types/templateconfig.go +++ b/typedapi/types/templateconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,12 +31,12 @@ import ( // TemplateConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/msearch_template/types.ts#L28-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/msearch_template/types.ts#L28-L54 type TemplateConfig struct { // Explain If `true`, returns detailed information about score calculation as part of // each hit. Explain *bool `json:"explain,omitempty"` - // Id ID of the search template to use. If no source is specified, + // Id The ID of the search template to use. If no `source` is specified, // this parameter is required. Id *string `json:"id,omitempty"` // Params Key-value pairs used to replace Mustache variables in the template. @@ -46,7 +46,8 @@ type TemplateConfig struct { // Profile If `true`, the query execution is profiled. Profile *bool `json:"profile,omitempty"` // Source An inline search template. Supports the same parameters as the search API's - // request body. Also supports Mustache variables. If no id is specified, this + // request body. It also supports Mustache variables. If no `id` is specified, + // this // parameter is required. Source *string `json:"source,omitempty"` } @@ -127,8 +128,18 @@ func (s *TemplateConfig) UnmarshalJSON(data []byte) error { // NewTemplateConfig returns a TemplateConfig. func NewTemplateConfig() *TemplateConfig { r := &TemplateConfig{ - Params: make(map[string]json.RawMessage, 0), + Params: make(map[string]json.RawMessage), } return r } + +// true + +type TemplateConfigVariant interface { + TemplateConfigCaster() *TemplateConfig +} + +func (s *TemplateConfig) TemplateConfigCaster() *TemplateConfig { + return s +} diff --git a/typedapi/types/templatemapping.go b/typedapi/types/templatemapping.go index d2f9083ec7..5d3c23cfbf 100644 --- a/typedapi/types/templatemapping.go +++ b/typedapi/types/templatemapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TemplateMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/TemplateMapping.ts#L27-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/TemplateMapping.ts#L27-L34 type TemplateMapping struct { Aliases map[string]Alias `json:"aliases"` IndexPatterns []string `json:"index_patterns"` @@ -111,9 +111,11 @@ func (s *TemplateMapping) UnmarshalJSON(data []byte) error { // NewTemplateMapping returns a TemplateMapping. func NewTemplateMapping() *TemplateMapping { r := &TemplateMapping{ - Aliases: make(map[string]Alias, 0), - Settings: make(map[string]json.RawMessage, 0), + Aliases: make(map[string]Alias), + Settings: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/templatesrecord.go b/typedapi/types/templatesrecord.go index c7091a771b..6deb4f92e8 100644 --- a/typedapi/types/templatesrecord.go +++ b/typedapi/types/templatesrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TemplatesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/templates/types.ts#L22-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/templates/types.ts#L22-L48 type TemplatesRecord struct { // ComposedOf The component templates that comprise the index template. ComposedOf *string `json:"composed_of,omitempty"` @@ -117,3 +117,5 @@ func NewTemplatesRecord() *TemplatesRecord { return r } + +// false diff --git a/typedapi/types/term.go b/typedapi/types/term.go index d7ce4a9e38..40f309e1d5 100644 --- a/typedapi/types/term.go +++ b/typedapi/types/term.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Term type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/termvectors/types.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/termvectors/types.ts#L34-L40 type Term struct { DocFreq *int `json:"doc_freq,omitempty"` Score *Float64 `json:"score,omitempty"` @@ -135,3 +135,5 @@ func NewTerm() *Term { return r } + +// false diff --git a/typedapi/types/terminateprocessor.go b/typedapi/types/terminateprocessor.go new file mode 100644 index 0000000000..763d4d5fa6 --- /dev/null +++ b/typedapi/types/terminateprocessor.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TerminateProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1523-L1523 +type TerminateProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *TerminateProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewTerminateProcessor returns a TerminateProcessor. +func NewTerminateProcessor() *TerminateProcessor { + r := &TerminateProcessor{} + + return r +} + +// true + +type TerminateProcessorVariant interface { + TerminateProcessorCaster() *TerminateProcessor +} + +func (s *TerminateProcessor) TerminateProcessorCaster() *TerminateProcessor { + return s +} diff --git a/typedapi/types/termquery.go b/typedapi/types/termquery.go index 98d981bfbb..773ed6325e 100644 --- a/typedapi/types/termquery.go +++ b/typedapi/types/termquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TermQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L219-L233 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L238-L255 type TermQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -127,3 +127,13 @@ func NewTermQuery() *TermQuery { return r } + +// true + +type TermQueryVariant interface { + TermQueryCaster() *TermQuery +} + +func (s *TermQuery) TermQueryCaster() *TermQuery { + return s +} diff --git a/typedapi/types/termrangequery.go b/typedapi/types/termrangequery.go index 886b642615..2166ff5726 100644 --- a/typedapi/types/termrangequery.go +++ b/typedapi/types/termrangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // TermRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L159-L159 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L174-L174 type TermRangeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -187,3 +187,13 @@ func NewTermRangeQuery() *TermRangeQuery { return r } + +// true + +type TermRangeQueryVariant interface { + TermRangeQueryCaster() *TermRangeQuery +} + +func (s *TermRangeQuery) TermRangeQueryCaster() *TermRangeQuery { + return s +} diff --git a/typedapi/types/termsaggregatebasedoubletermsbucket.go b/typedapi/types/termsaggregatebasedoubletermsbucket.go deleted file mode 100644 index 911398c36c..0000000000 --- a/typedapi/types/termsaggregatebasedoubletermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseDoubleTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L379-L384 -type TermsAggregateBaseDoubleTermsBucket struct { - Buckets BucketsDoubleTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseDoubleTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]DoubleTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []DoubleTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseDoubleTermsBucket returns a TermsAggregateBaseDoubleTermsBucket. -func NewTermsAggregateBaseDoubleTermsBucket() *TermsAggregateBaseDoubleTermsBucket { - r := &TermsAggregateBaseDoubleTermsBucket{} - - return r -} diff --git a/typedapi/types/termsaggregatebaselongtermsbucket.go b/typedapi/types/termsaggregatebaselongtermsbucket.go deleted file mode 100644 index 506e83eacf..0000000000 --- a/typedapi/types/termsaggregatebaselongtermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseLongTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L379-L384 -type TermsAggregateBaseLongTermsBucket struct { - Buckets BucketsLongTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseLongTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]LongTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []LongTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseLongTermsBucket returns a TermsAggregateBaseLongTermsBucket. -func NewTermsAggregateBaseLongTermsBucket() *TermsAggregateBaseLongTermsBucket { - r := &TermsAggregateBaseLongTermsBucket{} - - return r -} diff --git a/typedapi/types/termsaggregatebasemultitermsbucket.go b/typedapi/types/termsaggregatebasemultitermsbucket.go deleted file mode 100644 index 7b63b1c6e1..0000000000 --- a/typedapi/types/termsaggregatebasemultitermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseMultiTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L379-L384 -type TermsAggregateBaseMultiTermsBucket struct { - Buckets BucketsMultiTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseMultiTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]MultiTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []MultiTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseMultiTermsBucket returns a TermsAggregateBaseMultiTermsBucket. -func NewTermsAggregateBaseMultiTermsBucket() *TermsAggregateBaseMultiTermsBucket { - r := &TermsAggregateBaseMultiTermsBucket{} - - return r -} diff --git a/typedapi/types/termsaggregatebasestringtermsbucket.go b/typedapi/types/termsaggregatebasestringtermsbucket.go deleted file mode 100644 index edd7ec635f..0000000000 --- a/typedapi/types/termsaggregatebasestringtermsbucket.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseStringTermsBucket type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L379-L384 -type TermsAggregateBaseStringTermsBucket struct { - Buckets BucketsStringTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseStringTermsBucket) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]StringTermsBucket, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []StringTermsBucket{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseStringTermsBucket returns a TermsAggregateBaseStringTermsBucket. -func NewTermsAggregateBaseStringTermsBucket() *TermsAggregateBaseStringTermsBucket { - r := &TermsAggregateBaseStringTermsBucket{} - - return r -} diff --git a/typedapi/types/termsaggregatebasevoid.go b/typedapi/types/termsaggregatebasevoid.go deleted file mode 100644 index ddc4164c0c..0000000000 --- a/typedapi/types/termsaggregatebasevoid.go +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// TermsAggregateBaseVoid type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L379-L384 -type TermsAggregateBaseVoid struct { - Buckets BucketsVoid `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta Metadata `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` -} - -func (s *TermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "buckets": - - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := make(map[string]any, 0) - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - case '[': - o := []any{} - if err := localDec.Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Buckets", err) - } - s.Buckets = o - } - - case "doc_count_error_upper_bound": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) - } - s.DocCountErrorUpperBound = &value - case float64: - f := int64(v) - s.DocCountErrorUpperBound = &f - } - - case "meta": - if err := dec.Decode(&s.Meta); err != nil { - return fmt.Errorf("%s | %w", "Meta", err) - } - - case "sum_other_doc_count": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "SumOtherDocCount", err) - } - s.SumOtherDocCount = &value - case float64: - f := int64(v) - s.SumOtherDocCount = &f - } - - } - } - return nil -} - -// NewTermsAggregateBaseVoid returns a TermsAggregateBaseVoid. -func NewTermsAggregateBaseVoid() *TermsAggregateBaseVoid { - r := &TermsAggregateBaseVoid{} - - return r -} diff --git a/typedapi/types/termsaggregation.go b/typedapi/types/termsaggregation.go index dbb4211202..d45f1fd9f8 100644 --- a/typedapi/types/termsaggregation.go +++ b/typedapi/types/termsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -36,7 +36,7 @@ import ( // TermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L912-L977 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L963-L1031 type TermsAggregation struct { // CollectMode Determines how child aggregations should be calculated: breadth-first or // depth-first. @@ -326,3 +326,13 @@ func NewTermsAggregation() *TermsAggregation { return r } + +// true + +type TermsAggregationVariant interface { + TermsAggregationCaster() *TermsAggregation +} + +func (s *TermsAggregation) TermsAggregationCaster() *TermsAggregation { + return s +} diff --git a/typedapi/types/termsexclude.go b/typedapi/types/termsexclude.go index 2be14e1201..d15acabc35 100644 --- a/typedapi/types/termsexclude.go +++ b/typedapi/types/termsexclude.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TermsExclude type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1008-L1009 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1077-L1078 type TermsExclude []string + +type TermsExcludeVariant interface { + TermsExcludeCaster() *TermsExclude +} diff --git a/typedapi/types/termsgrouping.go b/typedapi/types/termsgrouping.go index b71afaeffe..9d20872f82 100644 --- a/typedapi/types/termsgrouping.go +++ b/typedapi/types/termsgrouping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TermsGrouping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/rollup/_types/Groupings.ts#L75-L82 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/rollup/_types/Groupings.ts#L75-L82 type TermsGrouping struct { // Fields The set of fields that you wish to collect terms for. // This array can contain fields that are both keyword and numerics. @@ -80,3 +80,13 @@ func NewTermsGrouping() *TermsGrouping { return r } + +// true + +type TermsGroupingVariant interface { + TermsGroupingCaster() *TermsGrouping +} + +func (s *TermsGrouping) TermsGroupingCaster() *TermsGrouping { + return s +} diff --git a/typedapi/types/termsinclude.go b/typedapi/types/termsinclude.go index 05ab62107f..2a9526117d 100644 --- a/typedapi/types/termsinclude.go +++ b/typedapi/types/termsinclude.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,5 +26,9 @@ package types // []string // TermsPartition // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1005-L1006 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1074-L1075 type TermsInclude any + +type TermsIncludeVariant interface { + TermsIncludeCaster() *TermsInclude +} diff --git a/typedapi/types/termslookup.go b/typedapi/types/termslookup.go index a30bd4a3b0..d59d28c836 100644 --- a/typedapi/types/termslookup.go +++ b/typedapi/types/termslookup.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TermsLookup type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L247-L252 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L270-L275 type TermsLookup struct { Id string `json:"id"` Index string `json:"index"` @@ -84,3 +84,13 @@ func NewTermsLookup() *TermsLookup { return r } + +// true + +type TermsLookupVariant interface { + TermsLookupCaster() *TermsLookup +} + +func (s *TermsLookup) TermsLookupCaster() *TermsLookup { + return s +} diff --git a/typedapi/types/termspartition.go b/typedapi/types/termspartition.go index db4e4d4039..7851901552 100644 --- a/typedapi/types/termspartition.go +++ b/typedapi/types/termspartition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TermsPartition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1011-L1020 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1080-L1089 type TermsPartition struct { // NumPartitions The number of partitions. NumPartitions int64 `json:"num_partitions"` @@ -95,3 +95,13 @@ func NewTermsPartition() *TermsPartition { return r } + +// true + +type TermsPartitionVariant interface { + TermsPartitionCaster() *TermsPartition +} + +func (s *TermsPartition) TermsPartitionCaster() *TermsPartition { + return s +} diff --git a/typedapi/types/termsquery.go b/typedapi/types/termsquery.go index d5785a3d28..e8f35759bb 100644 --- a/typedapi/types/termsquery.go +++ b/typedapi/types/termsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TermsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L235-L240 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L257-L263 type TermsQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -136,8 +136,18 @@ func (s TermsQuery) MarshalJSON() ([]byte, error) { // NewTermsQuery returns a TermsQuery. func NewTermsQuery() *TermsQuery { r := &TermsQuery{ - TermsQuery: make(map[string]TermsQueryField, 0), + TermsQuery: make(map[string]TermsQueryField), } return r } + +// true + +type TermsQueryVariant interface { + TermsQueryCaster() *TermsQuery +} + +func (s *TermsQuery) TermsQueryCaster() *TermsQuery { + return s +} diff --git a/typedapi/types/termsqueryfield.go b/typedapi/types/termsqueryfield.go index b738a60bcf..cf113d0d4b 100644 --- a/typedapi/types/termsqueryfield.go +++ b/typedapi/types/termsqueryfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // []FieldValue // TermsLookup // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L242-L245 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L265-L268 type TermsQueryField any + +type TermsQueryFieldVariant interface { + TermsQueryFieldCaster() *TermsQueryField +} diff --git a/typedapi/types/termssetquery.go b/typedapi/types/termssetquery.go index 80d593c7fd..d9267b1fe3 100644 --- a/typedapi/types/termssetquery.go +++ b/typedapi/types/termssetquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TermsSetQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L254-L267 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L277-L299 type TermsSetQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -39,6 +39,9 @@ type TermsSetQuery struct { // A boost value between 0 and 1.0 decreases the relevance score. // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` + // MinimumShouldMatch Specification describing number of matching terms required to return a + // document. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` // MinimumShouldMatchField Numeric field containing the number of matching terms required to return a // document. MinimumShouldMatchField *string `json:"minimum_should_match_field,omitempty"` @@ -81,6 +84,11 @@ func (s *TermsSetQuery) UnmarshalJSON(data []byte) error { s.Boost = &f } + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + case "minimum_should_match_field": if err := dec.Decode(&s.MinimumShouldMatchField); err != nil { return fmt.Errorf("%s | %w", "MinimumShouldMatchField", err) @@ -119,3 +127,13 @@ func NewTermsSetQuery() *TermsSetQuery { return r } + +// true + +type TermsSetQueryVariant interface { + TermsSetQueryCaster() *TermsSetQuery +} + +func (s *TermsSetQuery) TermsSetQueryCaster() *TermsSetQuery { + return s +} diff --git a/typedapi/types/termsuggest.go b/typedapi/types/termsuggest.go index 1c13b4c178..80f5196936 100644 --- a/typedapi/types/termsuggest.go +++ b/typedapi/types/termsuggest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TermSuggest type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L64-L69 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L64-L69 type TermSuggest struct { Length int `json:"length"` Offset int `json:"offset"` @@ -125,3 +125,5 @@ func NewTermSuggest() *TermSuggest { return r } + +// false diff --git a/typedapi/types/termsuggester.go b/typedapi/types/termsuggester.go index 7d066fb43a..4093963ff6 100644 --- a/typedapi/types/termsuggester.go +++ b/typedapi/types/termsuggester.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -35,7 +35,7 @@ import ( // TermSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L506-L568 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L506-L568 type TermSuggester struct { // Analyzer The analyzer to analyze the suggest text with. // Defaults to the search analyzer of the suggest field. @@ -300,3 +300,13 @@ func NewTermSuggester() *TermSuggester { return r } + +// true + +type TermSuggesterVariant interface { + TermSuggesterCaster() *TermSuggester +} + +func (s *TermSuggester) TermSuggesterCaster() *TermSuggester { + return s +} diff --git a/typedapi/types/termsuggestoption.go b/typedapi/types/termsuggestoption.go index 754b21c0ec..18236bb43d 100644 --- a/typedapi/types/termsuggestoption.go +++ b/typedapi/types/termsuggestoption.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TermSuggestOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/suggester.ts#L93-L99 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/suggester.ts#L93-L99 type TermSuggestOption struct { CollateMatch *bool `json:"collate_match,omitempty"` Freq int64 `json:"freq"` @@ -135,3 +135,5 @@ func NewTermSuggestOption() *TermSuggestOption { return r } + +// false diff --git a/typedapi/types/termvector.go b/typedapi/types/termvector.go index f0517e0453..9c9788dd32 100644 --- a/typedapi/types/termvector.go +++ b/typedapi/types/termvector.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TermVector type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/termvectors/types.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/termvectors/types.ts#L23-L26 type TermVector struct { FieldStatistics *FieldStatistics `json:"field_statistics,omitempty"` Terms map[string]Term `json:"terms"` @@ -31,8 +31,10 @@ type TermVector struct { // NewTermVector returns a TermVector. func NewTermVector() *TermVector { r := &TermVector{ - Terms: make(map[string]Term, 0), + Terms: make(map[string]Term), } return r } + +// false diff --git a/typedapi/types/termvectorsfilter.go b/typedapi/types/termvectorsfilter.go index 659f2bc02d..ca9ede4c85 100644 --- a/typedapi/types/termvectorsfilter.go +++ b/typedapi/types/termvectorsfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,15 +31,15 @@ import ( // TermVectorsFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/termvectors/types.ts#L49-L86 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/termvectors/types.ts#L49-L86 type TermVectorsFilter struct { // MaxDocFreq Ignore words which occur in more than this many docs. // Defaults to unbounded. MaxDocFreq *int `json:"max_doc_freq,omitempty"` - // MaxNumTerms Maximum number of terms that must be returned per field. + // MaxNumTerms The maximum number of terms that must be returned per field. MaxNumTerms *int `json:"max_num_terms,omitempty"` // MaxTermFreq Ignore words with more than this frequency in the source doc. - // Defaults to unbounded. + // It defaults to unbounded. MaxTermFreq *int `json:"max_term_freq,omitempty"` // MaxWordLength The maximum word length above which words will be ignored. // Defaults to unbounded. @@ -190,3 +190,13 @@ func NewTermVectorsFilter() *TermVectorsFilter { return r } + +// true + +type TermVectorsFilterVariant interface { + TermVectorsFilterCaster() *TermVectorsFilter +} + +func (s *TermVectorsFilter) TermVectorsFilterCaster() *TermVectorsFilter { + return s +} diff --git a/typedapi/types/termvectorsresult.go b/typedapi/types/termvectorsresult.go index 9a39f3b2ec..69a2277518 100644 --- a/typedapi/types/termvectorsresult.go +++ b/typedapi/types/termvectorsresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TermVectorsResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/mtermvectors/types.ts#L96-L104 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/mtermvectors/types.ts#L96-L104 type TermVectorsResult struct { Error *ErrorCause `json:"error,omitempty"` Found *bool `json:"found,omitempty"` @@ -122,8 +122,10 @@ func (s *TermVectorsResult) UnmarshalJSON(data []byte) error { // NewTermVectorsResult returns a TermVectorsResult. func NewTermVectorsResult() *TermVectorsResult { r := &TermVectorsResult{ - TermVectors: make(map[string]TermVector, 0), + TermVectors: make(map[string]TermVector), } return r } + +// false diff --git a/typedapi/types/termvectorstoken.go b/typedapi/types/termvectorstoken.go index 823b5ff82e..ea44e526bf 100644 --- a/typedapi/types/termvectorstoken.go +++ b/typedapi/types/termvectorstoken.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TermVectorsToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/termvectors/types.ts#L42-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/termvectors/types.ts#L42-L47 type TermVectorsToken struct { EndOffset *int `json:"end_offset,omitempty"` Payload *string `json:"payload,omitempty"` @@ -125,3 +125,5 @@ func NewTermVectorsToken() *TermVectorsToken { return r } + +// false diff --git a/typedapi/types/testpopulation.go b/typedapi/types/testpopulation.go index cca23074da..dcb027f6ce 100644 --- a/typedapi/types/testpopulation.go +++ b/typedapi/types/testpopulation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TestPopulation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L310-L320 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L319-L329 type TestPopulation struct { // Field The field to aggregate. Field string `json:"field"` @@ -80,3 +80,13 @@ func NewTestPopulation() *TestPopulation { return r } + +// true + +type TestPopulationVariant interface { + TestPopulationCaster() *TestPopulation +} + +func (s *TestPopulation) TestPopulationCaster() *TestPopulation { + return s +} diff --git a/typedapi/types/textclassificationinferenceoptions.go b/typedapi/types/textclassificationinferenceoptions.go index 4b152a5c73..117d128557 100644 --- a/typedapi/types/textclassificationinferenceoptions.go +++ b/typedapi/types/textclassificationinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TextClassificationInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L189-L199 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L173-L183 type TextClassificationInferenceOptions struct { // ClassificationLabels Classification labels to apply other than the stored labels. Must have the // same deminsions as the default configured labels @@ -109,3 +109,13 @@ func NewTextClassificationInferenceOptions() *TextClassificationInferenceOptions return r } + +// true + +type TextClassificationInferenceOptionsVariant interface { + TextClassificationInferenceOptionsCaster() *TextClassificationInferenceOptions +} + +func (s *TextClassificationInferenceOptions) TextClassificationInferenceOptionsCaster() *TextClassificationInferenceOptions { + return s +} diff --git a/typedapi/types/textclassificationinferenceupdateoptions.go b/typedapi/types/textclassificationinferenceupdateoptions.go index bb44dfd372..93533e335d 100644 --- a/typedapi/types/textclassificationinferenceupdateoptions.go +++ b/typedapi/types/textclassificationinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TextClassificationInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L363-L372 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L351-L360 type TextClassificationInferenceUpdateOptions struct { // ClassificationLabels Classification labels to apply other than the stored labels. Must have the // same deminsions as the default configured labels @@ -109,3 +109,13 @@ func NewTextClassificationInferenceUpdateOptions() *TextClassificationInferenceU return r } + +// true + +type TextClassificationInferenceUpdateOptionsVariant interface { + TextClassificationInferenceUpdateOptionsCaster() *TextClassificationInferenceUpdateOptions +} + +func (s *TextClassificationInferenceUpdateOptions) TextClassificationInferenceUpdateOptionsCaster() *TextClassificationInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/textembedding.go b/typedapi/types/textembedding.go index ce77fca506..712263dc3c 100644 --- a/typedapi/types/textembedding.go +++ b/typedapi/types/textembedding.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TextEmbedding type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Knn.ts#L74-L77 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Knn.ts#L94-L97 type TextEmbedding struct { ModelId string `json:"model_id"` ModelText string `json:"model_text"` @@ -87,3 +87,13 @@ func NewTextEmbedding() *TextEmbedding { return r } + +// true + +type TextEmbeddingVariant interface { + TextEmbeddingCaster() *TextEmbedding +} + +func (s *TextEmbedding) TextEmbeddingCaster() *TextEmbedding { + return s +} diff --git a/typedapi/types/textembeddingbyteresult.go b/typedapi/types/textembeddingbyteresult.go index e1fcc887e2..ace2c54221 100644 --- a/typedapi/types/textembeddingbyteresult.go +++ b/typedapi/types/textembeddingbyteresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TextEmbeddingByteResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Results.ts#L46-L51 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Results.ts#L53-L58 type TextEmbeddingByteResult struct { Embedding []byte `json:"embedding"` } @@ -66,3 +66,5 @@ func NewTextEmbeddingByteResult() *TextEmbeddingByteResult { return r } + +// false diff --git a/typedapi/types/textembeddinginferenceoptions.go b/typedapi/types/textembeddinginferenceoptions.go index da4718df3f..73dd882f0e 100644 --- a/typedapi/types/textembeddinginferenceoptions.go +++ b/typedapi/types/textembeddinginferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TextEmbeddingInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L237-L245 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L221-L231 type TextEmbeddingInferenceOptions struct { // EmbeddingSize The number of dimensions in the embedding output EmbeddingSize *int `json:"embedding_size,omitempty"` @@ -40,6 +40,7 @@ type TextEmbeddingInferenceOptions struct { ResultsField *string `json:"results_field,omitempty"` // Tokenization The tokenization options Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary Vocabulary `json:"vocabulary"` } func (s *TextEmbeddingInferenceOptions) UnmarshalJSON(data []byte) error { @@ -90,6 +91,11 @@ func (s *TextEmbeddingInferenceOptions) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Tokenization", err) } + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + } } return nil @@ -101,3 +107,13 @@ func NewTextEmbeddingInferenceOptions() *TextEmbeddingInferenceOptions { return r } + +// true + +type TextEmbeddingInferenceOptionsVariant interface { + TextEmbeddingInferenceOptionsCaster() *TextEmbeddingInferenceOptions +} + +func (s *TextEmbeddingInferenceOptions) TextEmbeddingInferenceOptionsCaster() *TextEmbeddingInferenceOptions { + return s +} diff --git a/typedapi/types/textembeddinginferenceupdateoptions.go b/typedapi/types/textembeddinginferenceupdateoptions.go index 28b8e67c36..d5283a991c 100644 --- a/typedapi/types/textembeddinginferenceupdateoptions.go +++ b/typedapi/types/textembeddinginferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TextEmbeddingInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L392-L396 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L380-L384 type TextEmbeddingInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -82,3 +82,13 @@ func NewTextEmbeddingInferenceUpdateOptions() *TextEmbeddingInferenceUpdateOptio return r } + +// true + +type TextEmbeddingInferenceUpdateOptionsVariant interface { + TextEmbeddingInferenceUpdateOptionsCaster() *TextEmbeddingInferenceUpdateOptions +} + +func (s *TextEmbeddingInferenceUpdateOptions) TextEmbeddingInferenceUpdateOptionsCaster() *TextEmbeddingInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/textembeddingresult.go b/typedapi/types/textembeddingresult.go index 707fc92a8e..b5fe59d1e9 100644 --- a/typedapi/types/textembeddingresult.go +++ b/typedapi/types/textembeddingresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TextEmbeddingResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/inference/_types/Results.ts#L53-L58 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/_types/Results.ts#L60-L65 type TextEmbeddingResult struct { Embedding []float32 `json:"embedding"` } @@ -66,3 +66,5 @@ func NewTextEmbeddingResult() *TextEmbeddingResult { return r } + +// false diff --git a/typedapi/types/textexpansioninferenceoptions.go b/typedapi/types/textexpansioninferenceoptions.go index 248e6c9345..6c70e36dc9 100644 --- a/typedapi/types/textexpansioninferenceoptions.go +++ b/typedapi/types/textexpansioninferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,13 +31,14 @@ import ( // TextExpansionInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L247-L253 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L233-L240 type TextExpansionInferenceOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. ResultsField *string `json:"results_field,omitempty"` // Tokenization The tokenization options Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary Vocabulary `json:"vocabulary"` } func (s *TextExpansionInferenceOptions) UnmarshalJSON(data []byte) error { @@ -72,6 +73,11 @@ func (s *TextExpansionInferenceOptions) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Tokenization", err) } + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + } } return nil @@ -83,3 +89,13 @@ func NewTextExpansionInferenceOptions() *TextExpansionInferenceOptions { return r } + +// true + +type TextExpansionInferenceOptionsVariant interface { + TextExpansionInferenceOptionsCaster() *TextExpansionInferenceOptions +} + +func (s *TextExpansionInferenceOptions) TextExpansionInferenceOptionsCaster() *TextExpansionInferenceOptions { + return s +} diff --git a/typedapi/types/textexpansioninferenceupdateoptions.go b/typedapi/types/textexpansioninferenceupdateoptions.go index 32fb30a005..092ac170d1 100644 --- a/typedapi/types/textexpansioninferenceupdateoptions.go +++ b/typedapi/types/textexpansioninferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TextExpansionInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L398-L402 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L386-L390 type TextExpansionInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -82,3 +82,13 @@ func NewTextExpansionInferenceUpdateOptions() *TextExpansionInferenceUpdateOptio return r } + +// true + +type TextExpansionInferenceUpdateOptionsVariant interface { + TextExpansionInferenceUpdateOptionsCaster() *TextExpansionInferenceUpdateOptions +} + +func (s *TextExpansionInferenceUpdateOptions) TextExpansionInferenceUpdateOptionsCaster() *TextExpansionInferenceUpdateOptions { + return s +} diff --git a/typedapi/types/textexpansionquery.go b/typedapi/types/textexpansionquery.go index 75ae56b234..bbfc43b87d 100644 --- a/typedapi/types/textexpansionquery.go +++ b/typedapi/types/textexpansionquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TextExpansionQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/TextExpansionQuery.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/TextExpansionQuery.ts#L23-L36 type TextExpansionQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -131,3 +131,13 @@ func NewTextExpansionQuery() *TextExpansionQuery { return r } + +// true + +type TextExpansionQueryVariant interface { + TextExpansionQueryCaster() *TextExpansionQuery +} + +func (s *TextExpansionQuery) TextExpansionQueryCaster() *TextExpansionQuery { + return s +} diff --git a/typedapi/types/textindexprefixes.go b/typedapi/types/textindexprefixes.go index ea1ac377fc..738332ba70 100644 --- a/typedapi/types/textindexprefixes.go +++ b/typedapi/types/textindexprefixes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TextIndexPrefixes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L264-L267 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L281-L284 type TextIndexPrefixes struct { MaxChars int `json:"max_chars"` MinChars int `json:"min_chars"` @@ -95,3 +95,13 @@ func NewTextIndexPrefixes() *TextIndexPrefixes { return r } + +// true + +type TextIndexPrefixesVariant interface { + TextIndexPrefixesCaster() *TextIndexPrefixes +} + +func (s *TextIndexPrefixes) TextIndexPrefixesCaster() *TextIndexPrefixes { + return s +} diff --git a/typedapi/types/textproperty.go b/typedapi/types/textproperty.go index 75bdea0617..7d42446041 100644 --- a/typedapi/types/textproperty.go +++ b/typedapi/types/textproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" ) // TextProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L269-L285 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L286-L303 type TextProperty struct { Analyzer *string `json:"analyzer,omitempty"` Boost *Float64 `json:"boost,omitempty"` @@ -51,16 +52,17 @@ type TextProperty struct { IndexPhrases *bool `json:"index_phrases,omitempty"` IndexPrefixes *TextIndexPrefixes `json:"index_prefixes,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Norms *bool `json:"norms,omitempty"` - PositionIncrementGap *int `json:"position_increment_gap,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - SearchAnalyzer *string `json:"search_analyzer,omitempty"` - SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + PositionIncrementGap *int `json:"position_increment_gap,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` + Similarity *string `json:"similarity,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` + Type string `json:"type,omitempty"` } func (s *TextProperty) UnmarshalJSON(data []byte) error { @@ -179,301 +181,313 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -590,301 +604,313 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -940,6 +966,11 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "term_vector": if err := dec.Decode(&s.TermVector); err != nil { return fmt.Errorf("%s | %w", "TermVector", err) @@ -980,6 +1011,7 @@ func (s TextProperty) MarshalJSON() ([]byte, error) { SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TermVector: s.TermVector, Type: s.Type, } @@ -992,10 +1024,20 @@ func (s TextProperty) MarshalJSON() ([]byte, error) { // NewTextProperty returns a TextProperty. func NewTextProperty() *TextProperty { r := &TextProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type TextPropertyVariant interface { + TextPropertyCaster() *TextProperty +} + +func (s *TextProperty) TextPropertyCaster() *TextProperty { + return s +} diff --git a/typedapi/types/textsimilarityreranker.go b/typedapi/types/textsimilarityreranker.go new file mode 100644 index 0000000000..650b99bbd5 --- /dev/null +++ b/typedapi/types/textsimilarityreranker.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextSimilarityReranker type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Retriever.ts#L93-L104 +type TextSimilarityReranker struct { + // Field The document field to be used for text similarity comparisons. This field + // should contain the text that will be evaluated against the inference_text + Field *string `json:"field,omitempty"` + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + // InferenceId Unique identifier of the inference endpoint created using the inference API. + InferenceId *string `json:"inference_id,omitempty"` + // InferenceText The text snippet used as the basis for similarity comparison + InferenceText *string `json:"inference_text,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // RankWindowSize This value determines how many documents we will consider from the nested + // retriever. + RankWindowSize *int `json:"rank_window_size,omitempty"` + // Retriever The nested retriever which will produce the first-level results, that will + // later be used for reranking. + Retriever RetrieverContainer `json:"retriever"` +} + +func (s *TextSimilarityReranker) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = &o + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "inference_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InferenceId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InferenceId = &o + + case "inference_text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InferenceText", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InferenceText = &o + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "rank_window_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RankWindowSize", err) + } + s.RankWindowSize = &value + case float64: + f := int(v) + s.RankWindowSize = &f + } + + case "retriever": + if err := dec.Decode(&s.Retriever); err != nil { + return fmt.Errorf("%s | %w", "Retriever", err) + } + + } + } + return nil +} + +// NewTextSimilarityReranker returns a TextSimilarityReranker. +func NewTextSimilarityReranker() *TextSimilarityReranker { + r := &TextSimilarityReranker{} + + return r +} + +// true + +type TextSimilarityRerankerVariant interface { + TextSimilarityRerankerCaster() *TextSimilarityReranker +} + +func (s *TextSimilarityReranker) TextSimilarityRerankerCaster() *TextSimilarityReranker { + return s +} diff --git a/typedapi/types/texttoanalyze.go b/typedapi/types/texttoanalyze.go index 1eb6253c95..c90109a1a1 100644 --- a/typedapi/types/texttoanalyze.go +++ b/typedapi/types/texttoanalyze.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TextToAnalyze type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/analyze/types.ts#L69-L69 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/analyze/types.ts#L69-L69 type TextToAnalyze []string + +type TextToAnalyzeVariant interface { + TextToAnalyzeCaster() *TextToAnalyze +} diff --git a/typedapi/types/thaianalyzer.go b/typedapi/types/thaianalyzer.go new file mode 100644 index 0000000000..d4ad1cd9d7 --- /dev/null +++ b/typedapi/types/thaianalyzer.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ThaiAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L297-L301 +type ThaiAnalyzer struct { + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ThaiAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ThaiAnalyzer) MarshalJSON() ([]byte, error) { + type innerThaiAnalyzer ThaiAnalyzer + tmp := innerThaiAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "thai" + + return json.Marshal(tmp) +} + +// NewThaiAnalyzer returns a ThaiAnalyzer. +func NewThaiAnalyzer() *ThaiAnalyzer { + r := &ThaiAnalyzer{} + + return r +} + +// true + +type ThaiAnalyzerVariant interface { + ThaiAnalyzerCaster() *ThaiAnalyzer +} + +func (s *ThaiAnalyzer) ThaiAnalyzerCaster() *ThaiAnalyzer { + return s +} diff --git a/typedapi/types/thaitokenizer.go b/typedapi/types/thaitokenizer.go new file mode 100644 index 0000000000..b003c794c4 --- /dev/null +++ b/typedapi/types/thaitokenizer.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ThaiTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L126-L128 +type ThaiTokenizer struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ThaiTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ThaiTokenizer) MarshalJSON() ([]byte, error) { + type innerThaiTokenizer ThaiTokenizer + tmp := innerThaiTokenizer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "thai" + + return json.Marshal(tmp) +} + +// NewThaiTokenizer returns a ThaiTokenizer. +func NewThaiTokenizer() *ThaiTokenizer { + r := &ThaiTokenizer{} + + return r +} + +// true + +type ThaiTokenizerVariant interface { + ThaiTokenizerCaster() *ThaiTokenizer +} + +func (s *ThaiTokenizer) ThaiTokenizerCaster() *ThaiTokenizer { + return s +} diff --git a/typedapi/types/threadcount.go b/typedapi/types/threadcount.go index 4cdcd1fc62..e3a10c6261 100644 --- a/typedapi/types/threadcount.go +++ b/typedapi/types/threadcount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ThreadCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L1004-L1029 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L1075-L1100 type ThreadCount struct { // Active Number of active threads in the thread pool. Active *int64 `json:"active,omitempty"` @@ -163,3 +163,5 @@ func NewThreadCount() *ThreadCount { return r } + +// false diff --git a/typedapi/types/threadpoolrecord.go b/typedapi/types/threadpoolrecord.go index 7b5b6398b6..b3914c1dde 100644 --- a/typedapi/types/threadpoolrecord.go +++ b/typedapi/types/threadpoolrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ThreadPoolRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/thread_pool/types.ts#L22-L124 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/thread_pool/types.ts#L22-L124 type ThreadPoolRecord struct { // Active The number of active threads in the current thread pool. Active *string `json:"active,omitempty"` @@ -336,3 +336,5 @@ func NewThreadPoolRecord() *ThreadPoolRecord { return r } + +// false diff --git a/typedapi/types/throttlestate.go b/typedapi/types/throttlestate.go index a0c779aa32..51ae6cc2a8 100644 --- a/typedapi/types/throttlestate.go +++ b/typedapi/types/throttlestate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ThrottleState type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L126-L129 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L120-L123 type ThrottleState struct { Reason string `json:"reason"` Timestamp DateTime `json:"timestamp"` @@ -80,3 +80,13 @@ func NewThrottleState() *ThrottleState { return r } + +// true + +type ThrottleStateVariant interface { + ThrottleStateCaster() *ThrottleState +} + +func (s *ThrottleState) ThrottleStateCaster() *ThrottleState { + return s +} diff --git a/typedapi/types/timehttphistogram.go b/typedapi/types/timehttphistogram.go new file mode 100644 index 0000000000..3f924a1b4b --- /dev/null +++ b/typedapi/types/timehttphistogram.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TimeHttpHistogram type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L708-L712 +type TimeHttpHistogram struct { + Count int64 `json:"count"` + GeMillis *int64 `json:"ge_millis,omitempty"` + LtMillis *int64 `json:"lt_millis,omitempty"` +} + +func (s *TimeHttpHistogram) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "ge_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "GeMillis", err) + } + s.GeMillis = &value + case float64: + f := int64(v) + s.GeMillis = &f + } + + case "lt_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LtMillis", err) + } + s.LtMillis = &value + case float64: + f := int64(v) + s.LtMillis = &f + } + + } + } + return nil +} + +// NewTimeHttpHistogram returns a TimeHttpHistogram. +func NewTimeHttpHistogram() *TimeHttpHistogram { + r := &TimeHttpHistogram{} + + return r +} + +// false diff --git a/typedapi/types/timeofmonth.go b/typedapi/types/timeofmonth.go index 07d81cab12..8f31aa1128 100644 --- a/typedapi/types/timeofmonth.go +++ b/typedapi/types/timeofmonth.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TimeOfMonth type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L110-L113 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L111-L114 type TimeOfMonth struct { At []string `json:"at"` On []int `json:"on"` @@ -34,3 +34,13 @@ func NewTimeOfMonth() *TimeOfMonth { return r } + +// true + +type TimeOfMonthVariant interface { + TimeOfMonthCaster() *TimeOfMonth +} + +func (s *TimeOfMonth) TimeOfMonthCaster() *TimeOfMonth { + return s +} diff --git a/typedapi/types/timeofweek.go b/typedapi/types/timeofweek.go index b951ce0691..f2f0f2a06b 100644 --- a/typedapi/types/timeofweek.go +++ b/typedapi/types/timeofweek.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // TimeOfWeek type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L115-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L116-L119 type TimeOfWeek struct { At []string `json:"at"` On []day.Day `json:"on"` @@ -38,3 +38,13 @@ func NewTimeOfWeek() *TimeOfWeek { return r } + +// true + +type TimeOfWeekVariant interface { + TimeOfWeekCaster() *TimeOfWeek +} + +func (s *TimeOfWeek) TimeOfWeekCaster() *TimeOfWeek { + return s +} diff --git a/typedapi/types/timeofyear.go b/typedapi/types/timeofyear.go index ea6b823010..051a305ebb 100644 --- a/typedapi/types/timeofyear.go +++ b/typedapi/types/timeofyear.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // TimeOfYear type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Schedule.ts#L120-L124 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Schedule.ts#L121-L125 type TimeOfYear struct { At []string `json:"at"` Int []month.Month `json:"int"` @@ -39,3 +39,13 @@ func NewTimeOfYear() *TimeOfYear { return r } + +// true + +type TimeOfYearVariant interface { + TimeOfYearCaster() *TimeOfYear +} + +func (s *TimeOfYear) TimeOfYearCaster() *TimeOfYear { + return s +} diff --git a/typedapi/types/multibucketaggregatebasevoid.go b/typedapi/types/timeseriesaggregate.go similarity index 69% rename from typedapi/types/multibucketaggregatebasevoid.go rename to typedapi/types/timeseriesaggregate.go index f46a024c08..3ec807e427 100644 --- a/typedapi/types/multibucketaggregatebasevoid.go +++ b/typedapi/types/timeseriesaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,15 +28,15 @@ import ( "io" ) -// MultiBucketAggregateBaseVoid type. +// TimeSeriesAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L327-L329 -type MultiBucketAggregateBaseVoid struct { - Buckets BucketsVoid `json:"buckets"` - Meta Metadata `json:"meta,omitempty"` +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L730-L731 +type TimeSeriesAggregate struct { + Buckets BucketsTimeSeriesBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } -func (s *MultiBucketAggregateBaseVoid) UnmarshalJSON(data []byte) error { +func (s *TimeSeriesAggregate) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) @@ -59,13 +59,13 @@ func (s *MultiBucketAggregateBaseVoid) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': - o := make(map[string]any, 0) + o := make(map[string]TimeSeriesBucket, 0) if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Buckets", err) } s.Buckets = o case '[': - o := []any{} + o := []TimeSeriesBucket{} if err := localDec.Decode(&o); err != nil { return fmt.Errorf("%s | %w", "Buckets", err) } @@ -82,9 +82,11 @@ func (s *MultiBucketAggregateBaseVoid) UnmarshalJSON(data []byte) error { return nil } -// NewMultiBucketAggregateBaseVoid returns a MultiBucketAggregateBaseVoid. -func NewMultiBucketAggregateBaseVoid() *MultiBucketAggregateBaseVoid { - r := &MultiBucketAggregateBaseVoid{} +// NewTimeSeriesAggregate returns a TimeSeriesAggregate. +func NewTimeSeriesAggregate() *TimeSeriesAggregate { + r := &TimeSeriesAggregate{} return r } + +// false diff --git a/typedapi/types/timeseriesaggregation.go b/typedapi/types/timeseriesaggregation.go new file mode 100644 index 0000000000..a817428a6b --- /dev/null +++ b/typedapi/types/timeseriesaggregation.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TimeSeriesAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1033-L1046 +type TimeSeriesAggregation struct { + // Keyed Set to `true` to associate a unique string key with each bucket and returns + // the ranges as a hash rather than an array. + Keyed *bool `json:"keyed,omitempty"` + // Size The maximum number of results to return. + Size *int `json:"size,omitempty"` +} + +func (s *TimeSeriesAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewTimeSeriesAggregation returns a TimeSeriesAggregation. +func NewTimeSeriesAggregation() *TimeSeriesAggregation { + r := &TimeSeriesAggregation{} + + return r +} + +// true + +type TimeSeriesAggregationVariant interface { + TimeSeriesAggregationCaster() *TimeSeriesAggregation +} + +func (s *TimeSeriesAggregation) TimeSeriesAggregationCaster() *TimeSeriesAggregation { + return s +} diff --git a/typedapi/types/timeseriesbucket.go b/typedapi/types/timeseriesbucket.go new file mode 100644 index 0000000000..adb135f366 --- /dev/null +++ b/typedapi/types/timeseriesbucket.go @@ -0,0 +1,644 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// TimeSeriesBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L733-L735 +type TimeSeriesBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key map[string]FieldValue `json:"key"` +} + +func (s *TimeSeriesBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + if s.Key == nil { + s.Key = make(map[string]FieldValue, 0) + } + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TimeSeriesBucket) MarshalJSON() ([]byte, error) { + type opt TimeSeriesBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewTimeSeriesBucket returns a TimeSeriesBucket. +func NewTimeSeriesBucket() *TimeSeriesBucket { + r := &TimeSeriesBucket{ + Aggregations: make(map[string]Aggregate), + Key: make(map[string]FieldValue), + } + + return r +} + +// false diff --git a/typedapi/types/timesync.go b/typedapi/types/timesync.go index 7ea8a2cda9..03b57b3f5c 100644 --- a/typedapi/types/timesync.go +++ b/typedapi/types/timesync.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TimeSync type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L177-L189 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L177-L189 type TimeSync struct { // Delay The time delay between the current time and the latest input data time. Delay Duration `json:"delay,omitempty"` @@ -78,3 +78,13 @@ func NewTimeSync() *TimeSync { return r } + +// true + +type TimeSyncVariant interface { + TimeSyncCaster() *TimeSync +} + +func (s *TimeSync) TimeSyncCaster() *TimeSync { + return s +} diff --git a/typedapi/types/timingstats.go b/typedapi/types/timingstats.go index de069cd182..29d10df834 100644 --- a/typedapi/types/timingstats.go +++ b/typedapi/types/timingstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TimingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L563-L568 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L564-L569 type TimingStats struct { // ElapsedTime Runtime of the analysis in milliseconds. ElapsedTime int64 `json:"elapsed_time"` @@ -74,3 +74,5 @@ func NewTimingStats() *TimingStats { return r } + +// false diff --git a/typedapi/types/tokencountproperty.go b/typedapi/types/tokencountproperty.go index 77c96c19b2..b36868a4f5 100644 --- a/typedapi/types/tokencountproperty.go +++ b/typedapi/types/tokencountproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // TokenCountProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/specialized.ts#L85-L92 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/specialized.ts#L94-L101 type TokenCountProperty struct { Analyzer *string `json:"analyzer,omitempty"` Boost *Float64 `json:"boost,omitempty"` @@ -45,12 +46,12 @@ type TokenCountProperty struct { IgnoreAbove *int `json:"ignore_above,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *Float64 `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *Float64 `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { @@ -164,301 +165,313 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -537,318 +550,318 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -863,6 +876,11 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -889,8 +907,8 @@ func (s TokenCountProperty) MarshalJSON() ([]byte, error) { Meta: s.Meta, NullValue: s.NullValue, Properties: s.Properties, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, Type: s.Type, } @@ -902,10 +920,20 @@ func (s TokenCountProperty) MarshalJSON() ([]byte, error) { // NewTokenCountProperty returns a TokenCountProperty. func NewTokenCountProperty() *TokenCountProperty { r := &TokenCountProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type TokenCountPropertyVariant interface { + TokenCountPropertyCaster() *TokenCountProperty +} + +func (s *TokenCountProperty) TokenCountPropertyCaster() *TokenCountProperty { + return s +} diff --git a/typedapi/types/tokendetail.go b/typedapi/types/tokendetail.go index 5f02c49d37..4195313aa6 100644 --- a/typedapi/types/tokendetail.go +++ b/typedapi/types/tokendetail.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TokenDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/analyze/types.ts#L71-L74 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/analyze/types.ts#L71-L74 type TokenDetail struct { Name string `json:"name"` Tokens []ExplainAnalyzeToken `json:"tokens"` @@ -80,3 +80,5 @@ func NewTokenDetail() *TokenDetail { return r } + +// false diff --git a/typedapi/types/tokenfilter.go b/typedapi/types/tokenfilter.go index 9d665ae8f5..028d4fcc36 100644 --- a/typedapi/types/tokenfilter.go +++ b/typedapi/types/tokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // TokenFilterDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L346-L348 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L345-L350 type TokenFilter any + +type TokenFilterVariant interface { + TokenFilterCaster() *TokenFilter +} diff --git a/typedapi/types/tokenfilterdefinition.go b/typedapi/types/tokenfilterdefinition.go index 1b60eecf2f..a1fa761c10 100644 --- a/typedapi/types/tokenfilterdefinition.go +++ b/typedapi/types/tokenfilterdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -70,5 +70,9 @@ package types // PhoneticTokenFilter // DictionaryDecompounderTokenFilter // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L350-L402 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L352-L404 type TokenFilterDefinition any + +type TokenFilterDefinitionVariant interface { + TokenFilterDefinitionCaster() *TokenFilterDefinition +} diff --git a/typedapi/types/tokenizationconfigcontainer.go b/typedapi/types/tokenizationconfigcontainer.go index fa39ebd78e..9cfa0c1835 100644 --- a/typedapi/types/tokenizationconfigcontainer.go +++ b/typedapi/types/tokenizationconfigcontainer.go @@ -16,25 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // TokenizationConfigContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L110-L129 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L110-L131 type TokenizationConfigContainer struct { + AdditionalTokenizationConfigContainerProperty map[string]json.RawMessage `json:"-"` // Bert Indicates BERT tokenization and its options Bert *NlpBertTokenizationConfig `json:"bert,omitempty"` + // BertJa Indicates BERT Japanese tokenization and its options + BertJa *NlpBertTokenizationConfig `json:"bert_ja,omitempty"` // Mpnet Indicates MPNET tokenization and its options Mpnet *NlpBertTokenizationConfig `json:"mpnet,omitempty"` // Roberta Indicates RoBERTa tokenization and its options Roberta *NlpRobertaTokenizationConfig `json:"roberta,omitempty"` } +// MarhsalJSON overrides marshalling for types with additional properties +func (s TokenizationConfigContainer) MarshalJSON() ([]byte, error) { + type opt TokenizationConfigContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTokenizationConfigContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTokenizationConfigContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewTokenizationConfigContainer returns a TokenizationConfigContainer. func NewTokenizationConfigContainer() *TokenizationConfigContainer { - r := &TokenizationConfigContainer{} + r := &TokenizationConfigContainer{ + AdditionalTokenizationConfigContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type TokenizationConfigContainerVariant interface { + TokenizationConfigContainerCaster() *TokenizationConfigContainer +} + +func (s *TokenizationConfigContainer) TokenizationConfigContainerCaster() *TokenizationConfigContainer { + return s +} diff --git a/typedapi/types/tokenizer.go b/typedapi/types/tokenizer.go index 87c3896fd6..4c01cd08e5 100644 --- a/typedapi/types/tokenizer.go +++ b/typedapi/types/tokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // string // TokenizerDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L120-L122 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L140-L145 type Tokenizer any + +type TokenizerVariant interface { + TokenizerCaster() *Tokenizer +} diff --git a/typedapi/types/tokenizerdefinition.go b/typedapi/types/tokenizerdefinition.go index 5f17962117..ab389fc62f 100644 --- a/typedapi/types/tokenizerdefinition.go +++ b/typedapi/types/tokenizerdefinition.go @@ -16,26 +16,34 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TokenizerDefinition holds the union for the following types: // // CharGroupTokenizer +// ClassicTokenizer // EdgeNGramTokenizer // KeywordTokenizer // LetterTokenizer // LowercaseTokenizer // NGramTokenizer -// NoriTokenizer // PathHierarchyTokenizer +// PatternTokenizer +// SimplePatternTokenizer +// SimplePatternSplitTokenizer // StandardTokenizer +// ThaiTokenizer // UaxEmailUrlTokenizer // WhitespaceTokenizer -// KuromojiTokenizer -// PatternTokenizer // IcuTokenizer +// KuromojiTokenizer +// NoriTokenizer // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L124-L142 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L147-L170 type TokenizerDefinition any + +type TokenizerDefinitionVariant interface { + TokenizerDefinitionCaster() *TokenizerDefinition +} diff --git a/typedapi/types/tokenpruningconfig.go b/typedapi/types/tokenpruningconfig.go index 47e54a1a6b..95eb22a63b 100644 --- a/typedapi/types/tokenpruningconfig.go +++ b/typedapi/types/tokenpruningconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TokenPruningConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/TokenPruningConfig.ts#L22-L35 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/TokenPruningConfig.ts#L22-L35 type TokenPruningConfig struct { // OnlyScorePrunedTokens Whether to only score pruned tokens, vs only scoring kept tokens. OnlyScorePrunedTokens *bool `json:"only_score_pruned_tokens,omitempty"` @@ -116,3 +116,13 @@ func NewTokenPruningConfig() *TokenPruningConfig { return r } + +// true + +type TokenPruningConfigVariant interface { + TokenPruningConfigCaster() *TokenPruningConfig +} + +func (s *TokenPruningConfig) TokenPruningConfigCaster() *TokenPruningConfig { + return s +} diff --git a/typedapi/types/toolcall.go b/typedapi/types/toolcall.go new file mode 100644 index 0000000000..08af3adc1e --- /dev/null +++ b/typedapi/types/toolcall.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ToolCall type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L122-L138 +type ToolCall struct { + // Function The function that the model called. + Function ToolCallFunction `json:"function"` + // Id The identifier of the tool call. + Id string `json:"id"` + // Type The type of the tool call. + Type string `json:"type"` +} + +func (s *ToolCall) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewToolCall returns a ToolCall. +func NewToolCall() *ToolCall { + r := &ToolCall{} + + return r +} + +// true + +type ToolCallVariant interface { + ToolCallCaster() *ToolCall +} + +func (s *ToolCall) ToolCallCaster() *ToolCall { + return s +} diff --git a/typedapi/types/toolcallfunction.go b/typedapi/types/toolcallfunction.go new file mode 100644 index 0000000000..782365b6a0 --- /dev/null +++ b/typedapi/types/toolcallfunction.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ToolCallFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/chat_completion_unified/UnifiedRequest.ts#L108-L120 +type ToolCallFunction struct { + // Arguments The arguments to call the function with in JSON format. + Arguments string `json:"arguments"` + // Name The name of the function to call. + Name string `json:"name"` +} + +func (s *ToolCallFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "arguments": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Arguments", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Arguments = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewToolCallFunction returns a ToolCallFunction. +func NewToolCallFunction() *ToolCallFunction { + r := &ToolCallFunction{} + + return r +} + +// true + +type ToolCallFunctionVariant interface { + ToolCallFunctionCaster() *ToolCallFunction +} + +func (s *ToolCallFunction) ToolCallFunctionCaster() *ToolCallFunction { + return s +} diff --git a/typedapi/types/topclassentry.go b/typedapi/types/topclassentry.go index 9b3ed46361..f9e074304b 100644 --- a/typedapi/types/topclassentry.go +++ b/typedapi/types/topclassentry.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TopClassEntry type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L440-L444 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L428-L432 type TopClassEntry struct { ClassName string `json:"class_name"` ClassProbability Float64 `json:"class_probability"` @@ -108,3 +108,5 @@ func NewTopClassEntry() *TopClassEntry { return r } + +// false diff --git a/typedapi/types/tophit.go b/typedapi/types/tophit.go index d6d17cf169..967270d469 100644 --- a/typedapi/types/tophit.go +++ b/typedapi/types/tophit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TopHit type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/text_structure/find_structure/types.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/text_structure/_types/Structure.ts#L35-L38 type TopHit struct { Count int64 `json:"count"` Value json.RawMessage `json:"value,omitempty"` @@ -83,3 +83,5 @@ func NewTopHit() *TopHit { return r } + +// false diff --git a/typedapi/types/tophitsaggregate.go b/typedapi/types/tophitsaggregate.go index ee8e5fcfd2..0e449fda40 100644 --- a/typedapi/types/tophitsaggregate.go +++ b/typedapi/types/tophitsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TopHitsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L658-L661 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L747-L753 type TopHitsAggregate struct { Hits HitsMetadata `json:"hits"` Meta Metadata `json:"meta,omitempty"` @@ -72,3 +72,5 @@ func NewTopHitsAggregate() *TopHitsAggregate { return r } + +// false diff --git a/typedapi/types/tophitsaggregation.go b/typedapi/types/tophitsaggregation.go index bafc649ae0..a92a7b60fb 100644 --- a/typedapi/types/tophitsaggregation.go +++ b/typedapi/types/tophitsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TopHitsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L337-L397 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L346-L406 type TopHitsAggregation struct { // DocvalueFields Fields for which to return doc values. DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` @@ -291,8 +291,18 @@ func (s *TopHitsAggregation) UnmarshalJSON(data []byte) error { // NewTopHitsAggregation returns a TopHitsAggregation. func NewTopHitsAggregation() *TopHitsAggregation { r := &TopHitsAggregation{ - ScriptFields: make(map[string]ScriptField, 0), + ScriptFields: make(map[string]ScriptField), } return r } + +// true + +type TopHitsAggregationVariant interface { + TopHitsAggregationCaster() *TopHitsAggregation +} + +func (s *TopHitsAggregation) TopHitsAggregationCaster() *TopHitsAggregation { + return s +} diff --git a/typedapi/types/topleftbottomrightgeobounds.go b/typedapi/types/topleftbottomrightgeobounds.go index 95a8347300..b8cfc5d70c 100644 --- a/typedapi/types/topleftbottomrightgeobounds.go +++ b/typedapi/types/topleftbottomrightgeobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TopLeftBottomRightGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L161-L164 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L161-L164 type TopLeftBottomRightGeoBounds struct { BottomRight GeoLocation `json:"bottom_right"` TopLeft GeoLocation `json:"top_left"` @@ -152,3 +152,13 @@ func NewTopLeftBottomRightGeoBounds() *TopLeftBottomRightGeoBounds { return r } + +// true + +type TopLeftBottomRightGeoBoundsVariant interface { + TopLeftBottomRightGeoBoundsCaster() *TopLeftBottomRightGeoBounds +} + +func (s *TopLeftBottomRightGeoBounds) TopLeftBottomRightGeoBoundsCaster() *TopLeftBottomRightGeoBounds { + return s +} diff --git a/typedapi/types/topmetrics.go b/typedapi/types/topmetrics.go index 12e1b6eb1e..24663880f8 100644 --- a/typedapi/types/topmetrics.go +++ b/typedapi/types/topmetrics.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TopMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L736-L740 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L832-L836 type TopMetrics struct { Metrics map[string]FieldValue `json:"metrics"` Sort []FieldValue `json:"sort"` @@ -31,8 +31,10 @@ type TopMetrics struct { // NewTopMetrics returns a TopMetrics. func NewTopMetrics() *TopMetrics { r := &TopMetrics{ - Metrics: make(map[string]FieldValue, 0), + Metrics: make(map[string]FieldValue), } return r } + +// false diff --git a/typedapi/types/topmetricsaggregate.go b/typedapi/types/topmetricsaggregate.go index ff33953a0c..94feff7e23 100644 --- a/typedapi/types/topmetricsaggregate.go +++ b/typedapi/types/topmetricsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TopMetricsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L731-L734 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L827-L830 type TopMetricsAggregate struct { Meta Metadata `json:"meta,omitempty"` Top []TopMetrics `json:"top"` @@ -72,3 +72,5 @@ func NewTopMetricsAggregate() *TopMetricsAggregate { return r } + +// false diff --git a/typedapi/types/topmetricsaggregation.go b/typedapi/types/topmetricsaggregation.go index e92996698b..3d519850ab 100644 --- a/typedapi/types/topmetricsaggregation.go +++ b/typedapi/types/topmetricsaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TopMetricsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L399-L413 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L408-L425 type TopMetricsAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -136,3 +136,13 @@ func NewTopMetricsAggregation() *TopMetricsAggregation { return r } + +// true + +type TopMetricsAggregationVariant interface { + TopMetricsAggregationCaster() *TopMetricsAggregation +} + +func (s *TopMetricsAggregation) TopMetricsAggregationCaster() *TopMetricsAggregation { + return s +} diff --git a/typedapi/types/topmetricsvalue.go b/typedapi/types/topmetricsvalue.go index 3a37949e98..0d0bfdb7cf 100644 --- a/typedapi/types/topmetricsvalue.go +++ b/typedapi/types/topmetricsvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TopMetricsValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L415-L420 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L427-L432 type TopMetricsValue struct { // Field A field to return as a metric. Field string `json:"field"` @@ -67,3 +67,13 @@ func NewTopMetricsValue() *TopMetricsValue { return r } + +// true + +type TopMetricsValueVariant interface { + TopMetricsValueCaster() *TopMetricsValue +} + +func (s *TopMetricsValue) TopMetricsValueCaster() *TopMetricsValue { + return s +} diff --git a/typedapi/types/toprightbottomleftgeobounds.go b/typedapi/types/toprightbottomleftgeobounds.go index 75f2043e80..2ed9b5f76c 100644 --- a/typedapi/types/toprightbottomleftgeobounds.go +++ b/typedapi/types/toprightbottomleftgeobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TopRightBottomLeftGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L166-L169 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L166-L169 type TopRightBottomLeftGeoBounds struct { BottomLeft GeoLocation `json:"bottom_left"` TopRight GeoLocation `json:"top_right"` @@ -152,3 +152,13 @@ func NewTopRightBottomLeftGeoBounds() *TopRightBottomLeftGeoBounds { return r } + +// true + +type TopRightBottomLeftGeoBoundsVariant interface { + TopRightBottomLeftGeoBoundsCaster() *TopRightBottomLeftGeoBounds +} + +func (s *TopRightBottomLeftGeoBounds) TopRightBottomLeftGeoBoundsCaster() *TopRightBottomLeftGeoBounds { + return s +} diff --git a/typedapi/types/totalfeatureimportance.go b/typedapi/types/totalfeatureimportance.go index deeb94cae5..915d637d4f 100644 --- a/typedapi/types/totalfeatureimportance.go +++ b/typedapi/types/totalfeatureimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TotalFeatureImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L233-L240 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L289-L296 type TotalFeatureImportance struct { // Classes If the trained model is a classification model, feature importance statistics // are gathered per target class value. @@ -83,3 +83,5 @@ func NewTotalFeatureImportance() *TotalFeatureImportance { return r } + +// false diff --git a/typedapi/types/totalfeatureimportanceclass.go b/typedapi/types/totalfeatureimportanceclass.go index 60051dbeea..5094d835d1 100644 --- a/typedapi/types/totalfeatureimportanceclass.go +++ b/typedapi/types/totalfeatureimportanceclass.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TotalFeatureImportanceClass type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L242-L247 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L298-L303 type TotalFeatureImportanceClass struct { // ClassName The target class value. Could be a string, boolean, or number. ClassName string `json:"class_name"` @@ -75,3 +75,5 @@ func NewTotalFeatureImportanceClass() *TotalFeatureImportanceClass { return r } + +// false diff --git a/typedapi/types/totalfeatureimportancestatistics.go b/typedapi/types/totalfeatureimportancestatistics.go index 870e972cc2..50ecea41c3 100644 --- a/typedapi/types/totalfeatureimportancestatistics.go +++ b/typedapi/types/totalfeatureimportancestatistics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TotalFeatureImportanceStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L249-L256 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L305-L312 type TotalFeatureImportanceStatistics struct { // Max The maximum importance value across all the training data for this feature. Max int `json:"max"` @@ -117,3 +117,5 @@ func NewTotalFeatureImportanceStatistics() *TotalFeatureImportanceStatistics { return r } + +// false diff --git a/typedapi/types/totaluserprofiles.go b/typedapi/types/totaluserprofiles.go index 105cb2fb80..b072620b16 100644 --- a/typedapi/types/totaluserprofiles.go +++ b/typedapi/types/totaluserprofiles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TotalUserProfiles type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/suggest_user_profiles/Response.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/suggest_user_profiles/Response.ts#L24-L27 type TotalUserProfiles struct { Relation string `json:"relation"` Value int64 `json:"value"` @@ -83,3 +83,5 @@ func NewTotalUserProfiles() *TotalUserProfiles { return r } + +// false diff --git a/typedapi/types/trackhits.go b/typedapi/types/trackhits.go index 039e464733..bd0bbf760f 100644 --- a/typedapi/types/trackhits.go +++ b/typedapi/types/trackhits.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,9 @@ package types // bool // int // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/search/_types/hits.ts#L143-L151 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/search/_types/hits.ts#L144-L152 type TrackHits any + +type TrackHitsVariant interface { + TrackHitsCaster() *TrackHits +} diff --git a/typedapi/types/trainedmodel.go b/typedapi/types/trainedmodel.go index 1822651517..eb9a5fffe5 100644 --- a/typedapi/types/trainedmodel.go +++ b/typedapi/types/trainedmodel.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TrainedModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L60-L72 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L60-L72 type TrainedModel struct { // Ensemble The definition for an ensemble model Ensemble *Ensemble `json:"ensemble,omitempty"` @@ -42,3 +42,13 @@ func NewTrainedModel() *TrainedModel { return r } + +// true + +type TrainedModelVariant interface { + TrainedModelCaster() *TrainedModel +} + +func (s *TrainedModel) TrainedModelCaster() *TrainedModel { + return s +} diff --git a/typedapi/types/trainedmodelassignment.go b/typedapi/types/trainedmodelassignment.go index 64fdee1507..8c471fbcb3 100644 --- a/typedapi/types/trainedmodelassignment.go +++ b/typedapi/types/trainedmodelassignment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,11 +33,13 @@ import ( // TrainedModelAssignment type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L403-L418 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L459-L476 type TrainedModelAssignment struct { + AdaptiveAllocations *AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` // AssignmentState The overall assignment state. AssignmentState deploymentassignmentstate.DeploymentAssignmentState `json:"assignment_state"` MaxAssignedAllocations *int `json:"max_assigned_allocations,omitempty"` + Reason *string `json:"reason,omitempty"` // RoutingTable The allocation state for each node. RoutingTable map[string]TrainedModelAssignmentRoutingTable `json:"routing_table"` // StartTime The timestamp when the deployment started. @@ -60,6 +62,11 @@ func (s *TrainedModelAssignment) UnmarshalJSON(data []byte) error { switch t { + case "adaptive_allocations": + if err := dec.Decode(&s.AdaptiveAllocations); err != nil { + return fmt.Errorf("%s | %w", "AdaptiveAllocations", err) + } + case "assignment_state": if err := dec.Decode(&s.AssignmentState); err != nil { return fmt.Errorf("%s | %w", "AssignmentState", err) @@ -81,6 +88,18 @@ func (s *TrainedModelAssignment) UnmarshalJSON(data []byte) error { s.MaxAssignedAllocations = &f } + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + case "routing_table": if s.RoutingTable == nil { s.RoutingTable = make(map[string]TrainedModelAssignmentRoutingTable, 0) @@ -107,8 +126,10 @@ func (s *TrainedModelAssignment) UnmarshalJSON(data []byte) error { // NewTrainedModelAssignment returns a TrainedModelAssignment. func NewTrainedModelAssignment() *TrainedModelAssignment { r := &TrainedModelAssignment{ - RoutingTable: make(map[string]TrainedModelAssignmentRoutingTable, 0), + RoutingTable: make(map[string]TrainedModelAssignmentRoutingTable), } return r } + +// false diff --git a/typedapi/types/trainedmodelassignmentroutingtable.go b/typedapi/types/trainedmodelassignmentroutingtable.go index 67d088ce5b..42e4bb45ee 100644 --- a/typedapi/types/trainedmodelassignmentroutingtable.go +++ b/typedapi/types/trainedmodelassignmentroutingtable.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,13 +33,13 @@ import ( // TrainedModelAssignmentRoutingTable type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L374-L392 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L430-L448 type TrainedModelAssignmentRoutingTable struct { // CurrentAllocations Current number of allocations. CurrentAllocations int `json:"current_allocations"` // Reason The reason for the current state. It is usually populated only when the // `routing_state` is `failed`. - Reason string `json:"reason"` + Reason *string `json:"reason,omitempty"` // RoutingState The current routing state. RoutingState routingstate.RoutingState `json:"routing_state"` // TargetAllocations Target number of allocations. @@ -87,7 +87,7 @@ func (s *TrainedModelAssignmentRoutingTable) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Reason = o + s.Reason = &o case "routing_state": if err := dec.Decode(&s.RoutingState); err != nil { @@ -121,3 +121,5 @@ func NewTrainedModelAssignmentRoutingTable() *TrainedModelAssignmentRoutingTable return r } + +// false diff --git a/typedapi/types/trainedmodelassignmenttaskparameters.go b/typedapi/types/trainedmodelassignmenttaskparameters.go index 4f0b57e33c..32db09bc01 100644 --- a/typedapi/types/trainedmodelassignmenttaskparameters.go +++ b/typedapi/types/trainedmodelassignmenttaskparameters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,19 +33,21 @@ import ( // TrainedModelAssignmentTaskParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L316-L349 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L369-L405 type TrainedModelAssignmentTaskParameters struct { // CacheSize The size of the trained model cache. - CacheSize ByteSize `json:"cache_size"` + CacheSize ByteSize `json:"cache_size,omitempty"` // DeploymentId The unique identifier for the trained model deployment. DeploymentId string `json:"deployment_id"` // ModelBytes The size of the trained model in bytes. - ModelBytes int `json:"model_bytes"` + ModelBytes ByteSize `json:"model_bytes"` // ModelId The unique identifier for the trained model. ModelId string `json:"model_id"` // NumberOfAllocations The total number of allocations this model is assigned across ML nodes. - NumberOfAllocations int `json:"number_of_allocations"` - Priority trainingpriority.TrainingPriority `json:"priority"` + NumberOfAllocations int `json:"number_of_allocations"` + PerAllocationMemoryBytes ByteSize `json:"per_allocation_memory_bytes"` + PerDeploymentMemoryBytes ByteSize `json:"per_deployment_memory_bytes"` + Priority trainingpriority.TrainingPriority `json:"priority"` // QueueCapacity Number of inference requests are allowed in the queue at a time. QueueCapacity int `json:"queue_capacity"` // ThreadsPerAllocation Number of threads per allocation. @@ -78,19 +80,8 @@ func (s *TrainedModelAssignmentTaskParameters) UnmarshalJSON(data []byte) error } case "model_bytes": - - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("%s | %w", "ModelBytes", err) - } - s.ModelBytes = value - case float64: - f := int(v) - s.ModelBytes = f + if err := dec.Decode(&s.ModelBytes); err != nil { + return fmt.Errorf("%s | %w", "ModelBytes", err) } case "model_id": @@ -114,6 +105,16 @@ func (s *TrainedModelAssignmentTaskParameters) UnmarshalJSON(data []byte) error s.NumberOfAllocations = f } + case "per_allocation_memory_bytes": + if err := dec.Decode(&s.PerAllocationMemoryBytes); err != nil { + return fmt.Errorf("%s | %w", "PerAllocationMemoryBytes", err) + } + + case "per_deployment_memory_bytes": + if err := dec.Decode(&s.PerDeploymentMemoryBytes); err != nil { + return fmt.Errorf("%s | %w", "PerDeploymentMemoryBytes", err) + } + case "priority": if err := dec.Decode(&s.Priority); err != nil { return fmt.Errorf("%s | %w", "Priority", err) @@ -162,3 +163,5 @@ func NewTrainedModelAssignmentTaskParameters() *TrainedModelAssignmentTaskParame return r } + +// false diff --git a/typedapi/types/trainedmodelconfig.go b/typedapi/types/trainedmodelconfig.go index 1b77a394c4..787e1800ab 100644 --- a/typedapi/types/trainedmodelconfig.go +++ b/typedapi/types/trainedmodelconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // TrainedModelConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L165-L200 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L203-L239 type TrainedModelConfig struct { CompressedDefinition *string `json:"compressed_definition,omitempty"` // CreateTime The time when the trained model was created. @@ -64,8 +64,9 @@ type TrainedModelConfig struct { // created by data frame analytics contain analysis_config and input objects. Metadata *TrainedModelConfigMetadata `json:"metadata,omitempty"` // ModelId Identifier for the trained model. - ModelId string `json:"model_id"` - ModelSizeBytes ByteSize `json:"model_size_bytes,omitempty"` + ModelId string `json:"model_id"` + ModelPackage *ModelPackageConfig `json:"model_package,omitempty"` + ModelSizeBytes ByteSize `json:"model_size_bytes,omitempty"` // ModelType The model type ModelType *trainedmodeltype.TrainedModelType `json:"model_type,omitempty"` PrefixStrings *TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` @@ -223,6 +224,11 @@ func (s *TrainedModelConfig) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "ModelId", err) } + case "model_package": + if err := dec.Decode(&s.ModelPackage); err != nil { + return fmt.Errorf("%s | %w", "ModelPackage", err) + } + case "model_size_bytes": if err := dec.Decode(&s.ModelSizeBytes); err != nil { return fmt.Errorf("%s | %w", "ModelSizeBytes", err) @@ -256,8 +262,10 @@ func (s *TrainedModelConfig) UnmarshalJSON(data []byte) error { // NewTrainedModelConfig returns a TrainedModelConfig. func NewTrainedModelConfig() *TrainedModelConfig { r := &TrainedModelConfig{ - DefaultFieldMap: make(map[string]string, 0), + DefaultFieldMap: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/trainedmodelconfiginput.go b/typedapi/types/trainedmodelconfiginput.go index 30f1e67a65..b7eacbc5ea 100644 --- a/typedapi/types/trainedmodelconfiginput.go +++ b/typedapi/types/trainedmodelconfiginput.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TrainedModelConfigInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L202-L205 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L241-L244 type TrainedModelConfigInput struct { // FieldNames An array of input field names for the model. FieldNames []string `json:"field_names"` @@ -34,3 +34,5 @@ func NewTrainedModelConfigInput() *TrainedModelConfigInput { return r } + +// false diff --git a/typedapi/types/trainedmodelconfigmetadata.go b/typedapi/types/trainedmodelconfigmetadata.go index 137e5b548b..51bf29d863 100644 --- a/typedapi/types/trainedmodelconfigmetadata.go +++ b/typedapi/types/trainedmodelconfigmetadata.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TrainedModelConfigMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L207-L215 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L246-L254 type TrainedModelConfigMetadata struct { // FeatureImportanceBaseline An object that contains the baseline for feature importance values. For // regression analysis, it is a single value. For classification analysis, there @@ -42,8 +42,10 @@ type TrainedModelConfigMetadata struct { // NewTrainedModelConfigMetadata returns a TrainedModelConfigMetadata. func NewTrainedModelConfigMetadata() *TrainedModelConfigMetadata { r := &TrainedModelConfigMetadata{ - FeatureImportanceBaseline: make(map[string]string, 0), + FeatureImportanceBaseline: make(map[string]string), } return r } + +// false diff --git a/typedapi/types/trainedmodeldeploymentallocationstatus.go b/typedapi/types/trainedmodeldeploymentallocationstatus.go index 17e205e55f..17a74db093 100644 --- a/typedapi/types/trainedmodeldeploymentallocationstatus.go +++ b/typedapi/types/trainedmodeldeploymentallocationstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // TrainedModelDeploymentAllocationStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L394-L401 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L450-L457 type TrainedModelDeploymentAllocationStatus struct { // AllocationCount The current number of nodes where the model is allocated. AllocationCount int `json:"allocation_count"` @@ -106,3 +106,5 @@ func NewTrainedModelDeploymentAllocationStatus() *TrainedModelDeploymentAllocati return r } + +// false diff --git a/typedapi/types/trainedmodeldeploymentnodesstats.go b/typedapi/types/trainedmodeldeploymentnodesstats.go index 27a8876e5e..28353147d5 100644 --- a/typedapi/types/trainedmodeldeploymentnodesstats.go +++ b/typedapi/types/trainedmodeldeploymentnodesstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,34 +31,42 @@ import ( // TrainedModelDeploymentNodesStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L133-L163 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L156-L201 type TrainedModelDeploymentNodesStats struct { // AverageInferenceTimeMs The average time for each inference call to complete on this node. - AverageInferenceTimeMs Float64 `json:"average_inference_time_ms"` + AverageInferenceTimeMs Float64 `json:"average_inference_time_ms,omitempty"` + // AverageInferenceTimeMsExcludingCacheHits The average time for each inference call to complete on this node, excluding + // cache + AverageInferenceTimeMsExcludingCacheHits Float64 `json:"average_inference_time_ms_excluding_cache_hits,omitempty"` + AverageInferenceTimeMsLastMinute Float64 `json:"average_inference_time_ms_last_minute,omitempty"` // ErrorCount The number of errors when evaluating the trained model. - ErrorCount int `json:"error_count"` + ErrorCount *int `json:"error_count,omitempty"` + InferenceCacheHitCount *int64 `json:"inference_cache_hit_count,omitempty"` + InferenceCacheHitCountLastMinute *int64 `json:"inference_cache_hit_count_last_minute,omitempty"` // InferenceCount The total number of inference calls made against this node for this model. - InferenceCount int `json:"inference_count"` + InferenceCount *int64 `json:"inference_count,omitempty"` // LastAccess The epoch time stamp of the last inference call for the model on this node. - LastAccess int64 `json:"last_access"` + LastAccess *int64 `json:"last_access,omitempty"` // Node Information pertaining to the node. - Node DiscoveryNode `json:"node"` + Node DiscoveryNode `json:"node,omitempty"` // NumberOfAllocations The number of allocations assigned to this node. - NumberOfAllocations int `json:"number_of_allocations"` + NumberOfAllocations *int `json:"number_of_allocations,omitempty"` // NumberOfPendingRequests The number of inference requests queued to be processed. - NumberOfPendingRequests int `json:"number_of_pending_requests"` + NumberOfPendingRequests *int `json:"number_of_pending_requests,omitempty"` + PeakThroughputPerMinute int64 `json:"peak_throughput_per_minute"` // RejectionExecutionCount The number of inference requests that were not processed because the queue // was full. - RejectionExecutionCount int `json:"rejection_execution_count"` + RejectionExecutionCount *int `json:"rejection_execution_count,omitempty"` // RoutingState The current routing state and reason for the current routing state for this // allocation. RoutingState TrainedModelAssignmentRoutingTable `json:"routing_state"` // StartTime The epoch timestamp when the allocation started. - StartTime int64 `json:"start_time"` + StartTime *int64 `json:"start_time,omitempty"` // ThreadsPerAllocation The number of threads used by each allocation during inference. - ThreadsPerAllocation int `json:"threads_per_allocation"` + ThreadsPerAllocation *int `json:"threads_per_allocation,omitempty"` + ThroughputLastMinute int `json:"throughput_last_minute"` // TimeoutCount The number of inference requests that timed out before being processed. - TimeoutCount int `json:"timeout_count"` + TimeoutCount *int `json:"timeout_count,omitempty"` } func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { @@ -81,6 +89,16 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "AverageInferenceTimeMs", err) } + case "average_inference_time_ms_excluding_cache_hits": + if err := dec.Decode(&s.AverageInferenceTimeMsExcludingCacheHits); err != nil { + return fmt.Errorf("%s | %w", "AverageInferenceTimeMsExcludingCacheHits", err) + } + + case "average_inference_time_ms_last_minute": + if err := dec.Decode(&s.AverageInferenceTimeMsLastMinute); err != nil { + return fmt.Errorf("%s | %w", "AverageInferenceTimeMsLastMinute", err) + } + case "error_count": var tmp any @@ -91,41 +109,60 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "ErrorCount", err) } - s.ErrorCount = value + s.ErrorCount = &value case float64: f := int(v) - s.ErrorCount = f + s.ErrorCount = &f } - case "inference_count": + case "inference_cache_hit_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InferenceCacheHitCount", err) + } + s.InferenceCacheHitCount = &value + case float64: + f := int64(v) + s.InferenceCacheHitCount = &f + } + case "inference_cache_hit_count_last_minute": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: - value, err := strconv.Atoi(v) + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - return fmt.Errorf("%s | %w", "InferenceCount", err) + return fmt.Errorf("%s | %w", "InferenceCacheHitCountLastMinute", err) } - s.InferenceCount = value + s.InferenceCacheHitCountLastMinute = &value case float64: - f := int(v) - s.InferenceCount = f + f := int64(v) + s.InferenceCacheHitCountLastMinute = &f } - case "last_access": + case "inference_count": var tmp any dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { - return fmt.Errorf("%s | %w", "LastAccess", err) + return fmt.Errorf("%s | %w", "InferenceCount", err) } - s.LastAccess = value + s.InferenceCount = &value case float64: f := int64(v) - s.LastAccess = f + s.InferenceCount = &f + } + + case "last_access": + if err := dec.Decode(&s.LastAccess); err != nil { + return fmt.Errorf("%s | %w", "LastAccess", err) } case "node": @@ -143,10 +180,10 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "NumberOfAllocations", err) } - s.NumberOfAllocations = value + s.NumberOfAllocations = &value case float64: f := int(v) - s.NumberOfAllocations = f + s.NumberOfAllocations = &f } case "number_of_pending_requests": @@ -159,10 +196,25 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "NumberOfPendingRequests", err) } - s.NumberOfPendingRequests = value + s.NumberOfPendingRequests = &value case float64: f := int(v) - s.NumberOfPendingRequests = f + s.NumberOfPendingRequests = &f + } + + case "peak_throughput_per_minute": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakThroughputPerMinute", err) + } + s.PeakThroughputPerMinute = value + case float64: + f := int64(v) + s.PeakThroughputPerMinute = f } case "rejection_execution_count": @@ -175,10 +227,10 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "RejectionExecutionCount", err) } - s.RejectionExecutionCount = value + s.RejectionExecutionCount = &value case float64: f := int(v) - s.RejectionExecutionCount = f + s.RejectionExecutionCount = &f } case "routing_state": @@ -201,10 +253,26 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "ThreadsPerAllocation", err) } - s.ThreadsPerAllocation = value + s.ThreadsPerAllocation = &value case float64: f := int(v) - s.ThreadsPerAllocation = f + s.ThreadsPerAllocation = &f + } + + case "throughput_last_minute": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ThroughputLastMinute", err) + } + s.ThroughputLastMinute = value + case float64: + f := int(v) + s.ThroughputLastMinute = f } case "timeout_count": @@ -217,10 +285,10 @@ func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "TimeoutCount", err) } - s.TimeoutCount = value + s.TimeoutCount = &value case float64: f := int(v) - s.TimeoutCount = f + s.TimeoutCount = &f } } @@ -234,3 +302,5 @@ func NewTrainedModelDeploymentNodesStats() *TrainedModelDeploymentNodesStats { return r } + +// false diff --git a/typedapi/types/trainedmodeldeploymentstats.go b/typedapi/types/trainedmodeldeploymentstats.go index 0687959ee3..d6fde49f7c 100644 --- a/typedapi/types/trainedmodeldeploymentstats.go +++ b/typedapi/types/trainedmodeldeploymentstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,48 +28,52 @@ import ( "io" "strconv" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentstate" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentassignmentstate" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainingpriority" ) // TrainedModelDeploymentStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L62-L102 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L62-L107 type TrainedModelDeploymentStats struct { + AdaptiveAllocations *AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` // AllocationStatus The detailed allocation status for the deployment. - AllocationStatus TrainedModelDeploymentAllocationStatus `json:"allocation_status"` - CacheSize ByteSize `json:"cache_size,omitempty"` + AllocationStatus *TrainedModelDeploymentAllocationStatus `json:"allocation_status,omitempty"` + CacheSize ByteSize `json:"cache_size,omitempty"` // DeploymentId The unique identifier for the trained model deployment. DeploymentId string `json:"deployment_id"` // ErrorCount The sum of `error_count` for all nodes in the deployment. - ErrorCount int `json:"error_count"` + ErrorCount *int `json:"error_count,omitempty"` // InferenceCount The sum of `inference_count` for all nodes in the deployment. - InferenceCount int `json:"inference_count"` + InferenceCount *int `json:"inference_count,omitempty"` // ModelId The unique identifier for the trained model. ModelId string `json:"model_id"` // Nodes The deployment stats for each node that currently has the model allocated. // In serverless, stats are reported for a single unnamed virtual node. Nodes []TrainedModelDeploymentNodesStats `json:"nodes"` // NumberOfAllocations The number of allocations requested. - NumberOfAllocations int `json:"number_of_allocations"` + NumberOfAllocations *int `json:"number_of_allocations,omitempty"` + PeakThroughputPerMinute int64 `json:"peak_throughput_per_minute"` + Priority trainingpriority.TrainingPriority `json:"priority"` // QueueCapacity The number of inference requests that can be queued before new requests are // rejected. - QueueCapacity int `json:"queue_capacity"` + QueueCapacity *int `json:"queue_capacity,omitempty"` // Reason The reason for the current deployment state. Usually only populated when // the model is not deployed to a node. - Reason string `json:"reason"` + Reason *string `json:"reason,omitempty"` // RejectedExecutionCount The sum of `rejected_execution_count` for all nodes in the deployment. // Individual nodes reject an inference request if the inference queue is full. // The queue size is controlled by the `queue_capacity` setting in the start // trained model deployment API. - RejectedExecutionCount int `json:"rejected_execution_count"` + RejectedExecutionCount *int `json:"rejected_execution_count,omitempty"` // StartTime The epoch timestamp when the deployment started. StartTime int64 `json:"start_time"` // State The overall state of the deployment. - State deploymentstate.DeploymentState `json:"state"` + State *deploymentassignmentstate.DeploymentAssignmentState `json:"state,omitempty"` // ThreadsPerAllocation The number of threads used be each allocation during inference. - ThreadsPerAllocation int `json:"threads_per_allocation"` + ThreadsPerAllocation *int `json:"threads_per_allocation,omitempty"` // TimeoutCount The sum of `timeout_count` for all nodes in the deployment. - TimeoutCount int `json:"timeout_count"` + TimeoutCount *int `json:"timeout_count,omitempty"` } func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { @@ -87,6 +91,11 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { switch t { + case "adaptive_allocations": + if err := dec.Decode(&s.AdaptiveAllocations); err != nil { + return fmt.Errorf("%s | %w", "AdaptiveAllocations", err) + } + case "allocation_status": if err := dec.Decode(&s.AllocationStatus); err != nil { return fmt.Errorf("%s | %w", "AllocationStatus", err) @@ -112,10 +121,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "ErrorCount", err) } - s.ErrorCount = value + s.ErrorCount = &value case float64: f := int(v) - s.ErrorCount = f + s.ErrorCount = &f } case "inference_count": @@ -128,10 +137,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "InferenceCount", err) } - s.InferenceCount = value + s.InferenceCount = &value case float64: f := int(v) - s.InferenceCount = f + s.InferenceCount = &f } case "model_id": @@ -154,10 +163,30 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "NumberOfAllocations", err) } - s.NumberOfAllocations = value + s.NumberOfAllocations = &value case float64: f := int(v) - s.NumberOfAllocations = f + s.NumberOfAllocations = &f + } + + case "peak_throughput_per_minute": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakThroughputPerMinute", err) + } + s.PeakThroughputPerMinute = value + case float64: + f := int64(v) + s.PeakThroughputPerMinute = f + } + + case "priority": + if err := dec.Decode(&s.Priority); err != nil { + return fmt.Errorf("%s | %w", "Priority", err) } case "queue_capacity": @@ -170,10 +199,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "QueueCapacity", err) } - s.QueueCapacity = value + s.QueueCapacity = &value case float64: f := int(v) - s.QueueCapacity = f + s.QueueCapacity = &f } case "reason": @@ -186,7 +215,7 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { o = string(tmp[:]) } - s.Reason = o + s.Reason = &o case "rejected_execution_count": @@ -198,10 +227,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "RejectedExecutionCount", err) } - s.RejectedExecutionCount = value + s.RejectedExecutionCount = &value case float64: f := int(v) - s.RejectedExecutionCount = f + s.RejectedExecutionCount = &f } case "start_time": @@ -224,10 +253,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "ThreadsPerAllocation", err) } - s.ThreadsPerAllocation = value + s.ThreadsPerAllocation = &value case float64: f := int(v) - s.ThreadsPerAllocation = f + s.ThreadsPerAllocation = &f } case "timeout_count": @@ -240,10 +269,10 @@ func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("%s | %w", "TimeoutCount", err) } - s.TimeoutCount = value + s.TimeoutCount = &value case float64: f := int(v) - s.TimeoutCount = f + s.TimeoutCount = &f } } @@ -257,3 +286,5 @@ func NewTrainedModelDeploymentStats() *TrainedModelDeploymentStats { return r } + +// false diff --git a/typedapi/types/trainedmodelentities.go b/typedapi/types/trainedmodelentities.go index f5efcbe46b..8b3d06e616 100644 --- a/typedapi/types/trainedmodelentities.go +++ b/typedapi/types/trainedmodelentities.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrainedModelEntities type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L433-L439 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L421-L427 type TrainedModelEntities struct { ClassName string `json:"class_name"` ClassProbability Float64 `json:"class_probability"` @@ -138,3 +138,5 @@ func NewTrainedModelEntities() *TrainedModelEntities { return r } + +// false diff --git a/typedapi/types/trainedmodelinferenceclassimportance.go b/typedapi/types/trainedmodelinferenceclassimportance.go index 7ee52ec4cc..eaf993ed87 100644 --- a/typedapi/types/trainedmodelinferenceclassimportance.go +++ b/typedapi/types/trainedmodelinferenceclassimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrainedModelInferenceClassImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L446-L449 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L434-L437 type TrainedModelInferenceClassImportance struct { ClassName string `json:"class_name"` Importance Float64 `json:"importance"` @@ -91,3 +91,5 @@ func NewTrainedModelInferenceClassImportance() *TrainedModelInferenceClassImport return r } + +// false diff --git a/typedapi/types/trainedmodelinferencefeatureimportance.go b/typedapi/types/trainedmodelinferencefeatureimportance.go index 920c65d156..5030446486 100644 --- a/typedapi/types/trainedmodelinferencefeatureimportance.go +++ b/typedapi/types/trainedmodelinferencefeatureimportance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrainedModelInferenceFeatureImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L451-L455 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L439-L443 type TrainedModelInferenceFeatureImportance struct { Classes []TrainedModelInferenceClassImportance `json:"classes,omitempty"` FeatureName string `json:"feature_name"` @@ -97,3 +97,5 @@ func NewTrainedModelInferenceFeatureImportance() *TrainedModelInferenceFeatureIm return r } + +// false diff --git a/typedapi/types/trainedmodelinferencestats.go b/typedapi/types/trainedmodelinferencestats.go index c5729d8eea..d074e76995 100644 --- a/typedapi/types/trainedmodelinferencestats.go +++ b/typedapi/types/trainedmodelinferencestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrainedModelInferenceStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L104-L124 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L127-L147 type TrainedModelInferenceStats struct { // CacheMissCount The number of times the model was loaded for inference and was not retrieved // from the cache. @@ -147,3 +147,5 @@ func NewTrainedModelInferenceStats() *TrainedModelInferenceStats { return r } + +// false diff --git a/typedapi/types/trainedmodellocation.go b/typedapi/types/trainedmodellocation.go index e7e03b5119..7007e03d92 100644 --- a/typedapi/types/trainedmodellocation.go +++ b/typedapi/types/trainedmodellocation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TrainedModelLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L420-L422 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L478-L480 type TrainedModelLocation struct { Index TrainedModelLocationIndex `json:"index"` } @@ -33,3 +33,5 @@ func NewTrainedModelLocation() *TrainedModelLocation { return r } + +// false diff --git a/typedapi/types/trainedmodellocationindex.go b/typedapi/types/trainedmodellocationindex.go index a7082978cb..361d0b263c 100644 --- a/typedapi/types/trainedmodellocationindex.go +++ b/typedapi/types/trainedmodellocationindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TrainedModelLocationIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L424-L426 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L482-L484 type TrainedModelLocationIndex struct { Name string `json:"name"` } @@ -66,3 +66,5 @@ func NewTrainedModelLocationIndex() *TrainedModelLocationIndex { return r } + +// false diff --git a/typedapi/types/trainedmodelprefixstrings.go b/typedapi/types/trainedmodelprefixstrings.go index 85422fc0e1..a57b43452d 100644 --- a/typedapi/types/trainedmodelprefixstrings.go +++ b/typedapi/types/trainedmodelprefixstrings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrainedModelPrefixStrings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L428-L437 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L486-L495 type TrainedModelPrefixStrings struct { // Ingest String prepended to input at ingest Ingest *string `json:"ingest,omitempty"` @@ -89,3 +89,13 @@ func NewTrainedModelPrefixStrings() *TrainedModelPrefixStrings { return r } + +// true + +type TrainedModelPrefixStringsVariant interface { + TrainedModelPrefixStringsCaster() *TrainedModelPrefixStrings +} + +func (s *TrainedModelPrefixStrings) TrainedModelPrefixStringsCaster() *TrainedModelPrefixStrings { + return s +} diff --git a/typedapi/types/trainedmodelsizestats.go b/typedapi/types/trainedmodelsizestats.go index c15d8c8742..1a6f9c2132 100644 --- a/typedapi/types/trainedmodelsizestats.go +++ b/typedapi/types/trainedmodelsizestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TrainedModelSizeStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L126-L131 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L149-L154 type TrainedModelSizeStats struct { // ModelSizeBytes The size of the model in bytes. ModelSizeBytes ByteSize `json:"model_size_bytes"` @@ -74,3 +74,5 @@ func NewTrainedModelSizeStats() *TrainedModelSizeStats { return r } + +// false diff --git a/typedapi/types/trainedmodelsrecord.go b/typedapi/types/trainedmodelsrecord.go index 0c323bd17c..ea8a995d9c 100644 --- a/typedapi/types/trainedmodelsrecord.go +++ b/typedapi/types/trainedmodelsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrainedModelsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/ml_trained_models/types.ts#L23-L115 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/ml_trained_models/types.ts#L23-L115 type TrainedModelsRecord struct { // CreateTime The time the model was created. CreateTime DateTime `json:"create_time,omitempty"` @@ -286,3 +286,5 @@ func NewTrainedModelsRecord() *TrainedModelsRecord { return r } + +// false diff --git a/typedapi/types/trainedmodelstats.go b/typedapi/types/trainedmodelstats.go index e1707c7d37..d719ec07db 100644 --- a/typedapi/types/trainedmodelstats.go +++ b/typedapi/types/trainedmodelstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrainedModelStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/TrainedModel.ts#L42-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/TrainedModel.ts#L42-L60 type TrainedModelStats struct { // DeploymentStats A collection of deployment stats, which is present when the models are // deployed. @@ -117,8 +117,10 @@ func (s *TrainedModelStats) UnmarshalJSON(data []byte) error { // NewTrainedModelStats returns a TrainedModelStats. func NewTrainedModelStats() *TrainedModelStats { r := &TrainedModelStats{ - Ingest: make(map[string]json.RawMessage, 0), + Ingest: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/trainedmodeltree.go b/typedapi/types/trainedmodeltree.go index 100270afb3..c78140ca8c 100644 --- a/typedapi/types/trainedmodeltree.go +++ b/typedapi/types/trainedmodeltree.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrainedModelTree type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L74-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L74-L79 type TrainedModelTree struct { ClassificationLabels []string `json:"classification_labels,omitempty"` FeatureNames []string `json:"feature_names"` @@ -92,3 +92,13 @@ func NewTrainedModelTree() *TrainedModelTree { return r } + +// true + +type TrainedModelTreeVariant interface { + TrainedModelTreeCaster() *TrainedModelTree +} + +func (s *TrainedModelTree) TrainedModelTreeCaster() *TrainedModelTree { + return s +} diff --git a/typedapi/types/trainedmodeltreenode.go b/typedapi/types/trainedmodeltreenode.go index b37ffde5a3..ec700b6a21 100644 --- a/typedapi/types/trainedmodeltreenode.go +++ b/typedapi/types/trainedmodeltreenode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrainedModelTreeNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L81-L91 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L81-L91 type TrainedModelTreeNode struct { DecisionType *string `json:"decision_type,omitempty"` DefaultLeft *bool `json:"default_left,omitempty"` @@ -208,3 +208,13 @@ func NewTrainedModelTreeNode() *TrainedModelTreeNode { return r } + +// true + +type TrainedModelTreeNodeVariant interface { + TrainedModelTreeNodeCaster() *TrainedModelTreeNode +} + +func (s *TrainedModelTreeNode) TrainedModelTreeNodeCaster() *TrainedModelTreeNode { + return s +} diff --git a/typedapi/types/transformauthorization.go b/typedapi/types/transformauthorization.go index d42acfe991..041b687d39 100644 --- a/typedapi/types/transformauthorization.go +++ b/typedapi/types/transformauthorization.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TransformAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/Authorization.ts#L59-L71 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/Authorization.ts#L59-L71 type TransformAuthorization struct { // ApiKey If an API key was used for the most recent update to the transform, its name // and identifier are listed in the response. @@ -92,3 +92,5 @@ func NewTransformAuthorization() *TransformAuthorization { return r } + +// false diff --git a/typedapi/types/transformcontainer.go b/typedapi/types/transformcontainer.go index 6f12e69415..dfaa72d675 100644 --- a/typedapi/types/transformcontainer.go +++ b/typedapi/types/transformcontainer.go @@ -16,22 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // TransformContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Transform.ts#L27-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Transform.ts#L27-L34 type TransformContainer struct { - Chain []TransformContainer `json:"chain,omitempty"` - Script *ScriptTransform `json:"script,omitempty"` - Search *SearchTransform `json:"search,omitempty"` + AdditionalTransformContainerProperty map[string]json.RawMessage `json:"-"` + Chain []TransformContainer `json:"chain,omitempty"` + Script *ScriptTransform `json:"script,omitempty"` + Search *SearchTransform `json:"search,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TransformContainer) MarshalJSON() ([]byte, error) { + type opt TransformContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTransformContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTransformContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewTransformContainer returns a TransformContainer. func NewTransformContainer() *TransformContainer { - r := &TransformContainer{} + r := &TransformContainer{ + AdditionalTransformContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type TransformContainerVariant interface { + TransformContainerCaster() *TransformContainer +} + +func (s *TransformContainer) TransformContainerCaster() *TransformContainer { + return s +} diff --git a/typedapi/types/transformdestination.go b/typedapi/types/transformdestination.go index 8589d876be..f9311ea02d 100644 --- a/typedapi/types/transformdestination.go +++ b/typedapi/types/transformdestination.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TransformDestination type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L34-L45 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L34-L45 type TransformDestination struct { // Index The destination index for the transform. The mappings of the destination // index are deduced based on the source @@ -86,3 +86,13 @@ func NewTransformDestination() *TransformDestination { return r } + +// true + +type TransformDestinationVariant interface { + TransformDestinationCaster() *TransformDestination +} + +func (s *TransformDestination) TransformDestinationCaster() *TransformDestination { + return s +} diff --git a/typedapi/types/transformindexerstats.go b/typedapi/types/transformindexerstats.go index 5151f7523b..feb2476bc4 100644 --- a/typedapi/types/transformindexerstats.go +++ b/typedapi/types/transformindexerstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TransformIndexerStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/get_transform_stats/types.ts#L56-L74 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/get_transform_stats/types.ts#L56-L74 type TransformIndexerStats struct { DeleteTimeInMs *int64 `json:"delete_time_in_ms,omitempty"` DocumentsDeleted *int64 `json:"documents_deleted,omitempty"` @@ -285,3 +285,5 @@ func NewTransformIndexerStats() *TransformIndexerStats { return r } + +// false diff --git a/typedapi/types/transformprogress.go b/typedapi/types/transformprogress.go index 3443c5ea70..f53d5f00ff 100644 --- a/typedapi/types/transformprogress.go +++ b/typedapi/types/transformprogress.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TransformProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/get_transform_stats/types.ts#L48-L54 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/get_transform_stats/types.ts#L48-L54 type TransformProgress struct { DocsIndexed int64 `json:"docs_indexed"` DocsProcessed int64 `json:"docs_processed"` @@ -142,3 +142,5 @@ func NewTransformProgress() *TransformProgress { return r } + +// false diff --git a/typedapi/types/transformsource.go b/typedapi/types/transformsource.go index e5f4dc5c4c..0081eb8dde 100644 --- a/typedapi/types/transformsource.go +++ b/typedapi/types/transformsource.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TransformSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/_types/Transform.ts#L146-L165 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/_types/Transform.ts#L146-L165 type TransformSource struct { // Index The source indices for the transform. It can be a single index, an index // pattern (for example, `"my-index-*""`), an @@ -101,3 +101,13 @@ func NewTransformSource() *TransformSource { return r } + +// true + +type TransformSourceVariant interface { + TransformSourceCaster() *TransformSource +} + +func (s *TransformSource) TransformSourceCaster() *TransformSource { + return s +} diff --git a/typedapi/types/transformsrecord.go b/typedapi/types/transformsrecord.go index 43a278ee6c..c1c54599e6 100644 --- a/typedapi/types/transformsrecord.go +++ b/typedapi/types/transformsrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TransformsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cat/transforms/types.ts#L22-L197 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cat/transforms/types.ts#L22-L197 type TransformsRecord struct { // ChangesLastDetectionTime The timestamp when changes were last detected in the source indices. ChangesLastDetectionTime *string `json:"changes_last_detection_time,omitempty"` @@ -533,3 +533,5 @@ func NewTransformsRecord() *TransformsRecord { return r } + +// false diff --git a/typedapi/types/transformstats.go b/typedapi/types/transformstats.go index 8e248c9ead..e9fc1f6d71 100644 --- a/typedapi/types/transformstats.go +++ b/typedapi/types/transformstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TransformStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/get_transform_stats/types.ts#L31-L42 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/get_transform_stats/types.ts#L31-L42 type TransformStats struct { Checkpointing Checkpointing `json:"checkpointing"` Health *TransformStatsHealth `json:"health,omitempty"` @@ -117,3 +117,5 @@ func NewTransformStats() *TransformStats { return r } + +// false diff --git a/typedapi/types/transformstatshealth.go b/typedapi/types/transformstatshealth.go index 138a8cc18a..85c16e2e19 100644 --- a/typedapi/types/transformstatshealth.go +++ b/typedapi/types/transformstatshealth.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // TransformStatsHealth type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/get_transform_stats/types.ts#L44-L46 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/get_transform_stats/types.ts#L44-L46 type TransformStatsHealth struct { Status healthstatus.HealthStatus `json:"status"` } @@ -37,3 +37,5 @@ func NewTransformStatsHealth() *TransformStatsHealth { return r } + +// false diff --git a/typedapi/types/transformsummary.go b/typedapi/types/transformsummary.go index 389e470756..447e908973 100644 --- a/typedapi/types/transformsummary.go +++ b/typedapi/types/transformsummary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TransformSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/transform/get_transform/types.ts#L33-L61 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/transform/get_transform/types.ts#L33-L61 type TransformSummary struct { // Authorization The security privileges that the transform uses to run its queries. If // Elastic Stack security features were disabled at the time of the most recent @@ -164,3 +164,5 @@ func NewTransformSummary() *TransformSummary { return r } + +// false diff --git a/typedapi/types/translog.go b/typedapi/types/translog.go index a0d66bd7cb..68e96e0cd2 100644 --- a/typedapi/types/translog.go +++ b/typedapi/types/translog.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // Translog type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L341-L363 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L355-L377 type Translog struct { // Durability Whether or not to `fsync` and commit the translog after every index, delete, // update, or bulk request. @@ -101,3 +101,13 @@ func NewTranslog() *Translog { return r } + +// true + +type TranslogVariant interface { + TranslogCaster() *Translog +} + +func (s *Translog) TranslogCaster() *Translog { + return s +} diff --git a/typedapi/types/translogretention.go b/typedapi/types/translogretention.go index ee7afcff63..6179021569 100644 --- a/typedapi/types/translogretention.go +++ b/typedapi/types/translogretention.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TranslogRetention type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/_types/IndexSettings.ts#L382-L401 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/_types/IndexSettings.ts#L396-L415 type TranslogRetention struct { // Age This controls the maximum duration for which translog files are kept by each // shard. Keeping more @@ -90,3 +90,13 @@ func NewTranslogRetention() *TranslogRetention { return r } + +// true + +type TranslogRetentionVariant interface { + TranslogRetentionCaster() *TranslogRetention +} + +func (s *TranslogRetention) TranslogRetentionCaster() *TranslogRetention { + return s +} diff --git a/typedapi/types/translogstats.go b/typedapi/types/translogstats.go index ba09c16654..34a930761c 100644 --- a/typedapi/types/translogstats.go +++ b/typedapi/types/translogstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TranslogStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L397-L405 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L400-L408 type TranslogStats struct { EarliestLastModifiedAge int64 `json:"earliest_last_modified_age"` Operations int64 `json:"operations"` @@ -168,3 +168,5 @@ func NewTranslogStats() *TranslogStats { return r } + +// false diff --git a/typedapi/types/translogstatus.go b/typedapi/types/translogstatus.go index a721500b9b..71c0416f29 100644 --- a/typedapi/types/translogstatus.go +++ b/typedapi/types/translogstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TranslogStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L102-L109 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L102-L109 type TranslogStatus struct { Percent Percentage `json:"percent"` Recovered int64 `json:"recovered"` @@ -127,3 +127,5 @@ func NewTranslogStatus() *TranslogStatus { return r } + +// false diff --git a/typedapi/types/transport.go b/typedapi/types/transport.go index 688aaa310b..ff5b762d2a 100644 --- a/typedapi/types/transport.go +++ b/typedapi/types/transport.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Transport type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L1047-L1090 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L1118-L1161 type Transport struct { // InboundHandlingTimeHistogram The distribution of the time spent handling each inbound message on a // transport thread, represented as a histogram. @@ -219,3 +219,5 @@ func NewTransport() *Transport { return r } + +// false diff --git a/typedapi/types/transporthistogram.go b/typedapi/types/transporthistogram.go index 6918412e95..bcb8caed3c 100644 --- a/typedapi/types/transporthistogram.go +++ b/typedapi/types/transporthistogram.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TransportHistogram type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/nodes/_types/Stats.ts#L1092-L1106 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/nodes/_types/Stats.ts#L1163-L1177 type TransportHistogram struct { // Count The number of times a transport thread took a period of time within the // bounds of this bucket to handle an inbound message. @@ -115,3 +115,5 @@ func NewTransportHistogram() *TransportHistogram { return r } + +// false diff --git a/typedapi/types/triggercontainer.go b/typedapi/types/triggercontainer.go index 43f602ec46..0cca89e158 100644 --- a/typedapi/types/triggercontainer.go +++ b/typedapi/types/triggercontainer.go @@ -16,20 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // TriggerContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Trigger.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Trigger.ts#L23-L28 type TriggerContainer struct { - Schedule *ScheduleContainer `json:"schedule,omitempty"` + AdditionalTriggerContainerProperty map[string]json.RawMessage `json:"-"` + Schedule *ScheduleContainer `json:"schedule,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TriggerContainer) MarshalJSON() ([]byte, error) { + type opt TriggerContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTriggerContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTriggerContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewTriggerContainer returns a TriggerContainer. func NewTriggerContainer() *TriggerContainer { - r := &TriggerContainer{} + r := &TriggerContainer{ + AdditionalTriggerContainerProperty: make(map[string]json.RawMessage), + } return r } + +// true + +type TriggerContainerVariant interface { + TriggerContainerCaster() *TriggerContainer +} + +func (s *TriggerContainer) TriggerContainerCaster() *TriggerContainer { + return s +} diff --git a/typedapi/types/triggereventcontainer.go b/typedapi/types/triggereventcontainer.go index 869f5dd51d..3a1993d3f5 100644 --- a/typedapi/types/triggereventcontainer.go +++ b/typedapi/types/triggereventcontainer.go @@ -16,20 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types +import ( + "encoding/json" + "fmt" +) + // TriggerEventContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Trigger.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Trigger.ts#L32-L37 type TriggerEventContainer struct { - Schedule *ScheduleTriggerEvent `json:"schedule,omitempty"` + AdditionalTriggerEventContainerProperty map[string]json.RawMessage `json:"-"` + Schedule *ScheduleTriggerEvent `json:"schedule,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TriggerEventContainer) MarshalJSON() ([]byte, error) { + type opt TriggerEventContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTriggerEventContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTriggerEventContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewTriggerEventContainer returns a TriggerEventContainer. func NewTriggerEventContainer() *TriggerEventContainer { - r := &TriggerEventContainer{} + r := &TriggerEventContainer{ + AdditionalTriggerEventContainerProperty: make(map[string]json.RawMessage), + } return r } + +// false diff --git a/typedapi/types/triggereventresult.go b/typedapi/types/triggereventresult.go index 3873f9250c..909f161567 100644 --- a/typedapi/types/triggereventresult.go +++ b/typedapi/types/triggereventresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TriggerEventResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Trigger.ts#L39-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Trigger.ts#L39-L43 type TriggerEventResult struct { Manual TriggerEventContainer `json:"manual"` TriggeredTime DateTime `json:"triggered_time"` @@ -86,3 +86,5 @@ func NewTriggerEventResult() *TriggerEventResult { return r } + +// false diff --git a/typedapi/types/trimprocessor.go b/typedapi/types/trimprocessor.go index 1dd1a03087..0c45f8f850 100644 --- a/typedapi/types/trimprocessor.go +++ b/typedapi/types/trimprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TrimProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L1124-L1140 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1525-L1541 type TrimProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -160,3 +160,13 @@ func NewTrimProcessor() *TrimProcessor { return r } + +// true + +type TrimProcessorVariant interface { + TrimProcessorCaster() *TrimProcessor +} + +func (s *TrimProcessor) TrimProcessorCaster() *TrimProcessor { + return s +} diff --git a/typedapi/types/trimtokenfilter.go b/typedapi/types/trimtokenfilter.go index f9f5f3620c..246fd50513 100644 --- a/typedapi/types/trimtokenfilter.go +++ b/typedapi/types/trimtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // TrimTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L328-L330 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L327-L329 type TrimTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewTrimTokenFilter() *TrimTokenFilter { return r } + +// true + +type TrimTokenFilterVariant interface { + TrimTokenFilterCaster() *TrimTokenFilter +} + +func (s *TrimTokenFilter) TrimTokenFilterCaster() *TrimTokenFilter { + return s +} diff --git a/typedapi/types/truncatetokenfilter.go b/typedapi/types/truncatetokenfilter.go index 01aac8ea68..df96f1b5d3 100644 --- a/typedapi/types/truncatetokenfilter.go +++ b/typedapi/types/truncatetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TruncateTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L332-L335 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L331-L334 type TruncateTokenFilter struct { Length *int `json:"length,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewTruncateTokenFilter() *TruncateTokenFilter { return r } + +// true + +type TruncateTokenFilterVariant interface { + TruncateTokenFilterCaster() *TruncateTokenFilter +} + +func (s *TruncateTokenFilter) TruncateTokenFilterCaster() *TruncateTokenFilter { + return s +} diff --git a/typedapi/types/ttestaggregate.go b/typedapi/types/ttestaggregate.go index b88d814dda..46320dcd5e 100644 --- a/typedapi/types/ttestaggregate.go +++ b/typedapi/types/ttestaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TTestAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L742-L746 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L838-L845 type TTestAggregate struct { Meta Metadata `json:"meta,omitempty"` Value *Float64 `json:"value,omitempty"` @@ -86,3 +86,5 @@ func NewTTestAggregate() *TTestAggregate { return r } + +// false diff --git a/typedapi/types/ttestaggregation.go b/typedapi/types/ttestaggregation.go index ae2ec7e096..819f29e6c7 100644 --- a/typedapi/types/ttestaggregation.go +++ b/typedapi/types/ttestaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -26,7 +26,7 @@ import ( // TTestAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L294-L308 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L303-L317 type TTestAggregation struct { // A Test population A. A *TestPopulation `json:"a,omitempty"` @@ -42,3 +42,13 @@ func NewTTestAggregation() *TTestAggregation { return r } + +// true + +type TTestAggregationVariant interface { + TTestAggregationCaster() *TTestAggregation +} + +func (s *TTestAggregation) TTestAggregationCaster() *TTestAggregation { + return s +} diff --git a/typedapi/types/turkishanalyzer.go b/typedapi/types/turkishanalyzer.go new file mode 100644 index 0000000000..dfabc1a502 --- /dev/null +++ b/typedapi/types/turkishanalyzer.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TurkishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L290-L295 +type TurkishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords []string `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *TurkishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s TurkishAnalyzer) MarshalJSON() ([]byte, error) { + type innerTurkishAnalyzer TurkishAnalyzer + tmp := innerTurkishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "turkish" + + return json.Marshal(tmp) +} + +// NewTurkishAnalyzer returns a TurkishAnalyzer. +func NewTurkishAnalyzer() *TurkishAnalyzer { + r := &TurkishAnalyzer{} + + return r +} + +// true + +type TurkishAnalyzerVariant interface { + TurkishAnalyzerCaster() *TurkishAnalyzer +} + +func (s *TurkishAnalyzer) TurkishAnalyzerCaster() *TurkishAnalyzer { + return s +} diff --git a/typedapi/types/typefieldmappings.go b/typedapi/types/typefieldmappings.go index b875c000c9..5e0b4b6e3c 100644 --- a/typedapi/types/typefieldmappings.go +++ b/typedapi/types/typefieldmappings.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // TypeFieldMappings type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/get_field_mapping/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/get_field_mapping/types.ts#L24-L26 type TypeFieldMappings struct { Mappings map[string]FieldMapping `json:"mappings"` } @@ -30,8 +30,10 @@ type TypeFieldMappings struct { // NewTypeFieldMappings returns a TypeFieldMappings. func NewTypeFieldMappings() *TypeFieldMappings { r := &TypeFieldMappings{ - Mappings: make(map[string]FieldMapping, 0), + Mappings: make(map[string]FieldMapping), } return r } + +// false diff --git a/typedapi/types/typemapping.go b/typedapi/types/typemapping.go index 6f13c8a870..aafbbe37b7 100644 --- a/typedapi/types/typemapping.go +++ b/typedapi/types/typemapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/subobjects" ) // TypeMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/TypeMapping.ts#L34-L57 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/TypeMapping.ts#L34-L57 type TypeMapping struct { AllField *AllField `json:"all_field,omitempty"` DataStreamTimestamp_ *DataStreamTimestamp `json:"_data_stream_timestamp,omitempty"` @@ -51,7 +52,7 @@ type TypeMapping struct { Runtime map[string]RuntimeField `json:"runtime,omitempty"` Size_ *SizeField `json:"_size,omitempty"` Source_ *SourceField `json:"_source,omitempty"` - Subobjects *bool `json:"subobjects,omitempty"` + Subobjects *subobjects.Subobjects `json:"subobjects,omitempty"` } func (s *TypeMapping) UnmarshalJSON(data []byte) error { @@ -170,301 +171,313 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -494,17 +507,8 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { } case "subobjects": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "Subobjects", err) - } - s.Subobjects = &value - case bool: - s.Subobjects = &v + if err := dec.Decode(&s.Subobjects); err != nil { + return fmt.Errorf("%s | %w", "Subobjects", err) } } @@ -515,9 +519,19 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { // NewTypeMapping returns a TypeMapping. func NewTypeMapping() *TypeMapping { r := &TypeMapping{ - Properties: make(map[string]Property, 0), - Runtime: make(map[string]RuntimeField, 0), + Properties: make(map[string]Property), + Runtime: make(map[string]RuntimeField), } return r } + +// true + +type TypeMappingVariant interface { + TypeMappingCaster() *TypeMapping +} + +func (s *TypeMapping) TypeMappingCaster() *TypeMapping { + return s +} diff --git a/typedapi/types/typequery.go b/typedapi/types/typequery.go index a1f7cfbb7f..34b2c605f3 100644 --- a/typedapi/types/typequery.go +++ b/typedapi/types/typequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // TypeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L269-L271 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L301-L303 type TypeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -109,3 +109,13 @@ func NewTypeQuery() *TypeQuery { return r } + +// true + +type TypeQueryVariant interface { + TypeQueryCaster() *TypeQuery +} + +func (s *TypeQuery) TypeQueryCaster() *TypeQuery { + return s +} diff --git a/typedapi/types/uaxemailurltokenizer.go b/typedapi/types/uaxemailurltokenizer.go index 2125c5635c..fe43a09b1c 100644 --- a/typedapi/types/uaxemailurltokenizer.go +++ b/typedapi/types/uaxemailurltokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UaxEmailUrlTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L110-L113 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L130-L133 type UaxEmailUrlTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewUaxEmailUrlTokenizer() *UaxEmailUrlTokenizer { return r } + +// true + +type UaxEmailUrlTokenizerVariant interface { + UaxEmailUrlTokenizerCaster() *UaxEmailUrlTokenizer +} + +func (s *UaxEmailUrlTokenizer) UaxEmailUrlTokenizerCaster() *UaxEmailUrlTokenizer { + return s +} diff --git a/typedapi/types/unassignedinformation.go b/typedapi/types/unassignedinformation.go index a0bedd976e..c899ebf87f 100644 --- a/typedapi/types/unassignedinformation.go +++ b/typedapi/types/unassignedinformation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // UnassignedInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/cluster/allocation_explain/types.ts#L117-L125 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/cluster/allocation_explain/types.ts#L128-L136 type UnassignedInformation struct { AllocationStatus *string `json:"allocation_status,omitempty"` At DateTime `json:"at"` @@ -146,3 +146,5 @@ func NewUnassignedInformation() *UnassignedInformation { return r } + +// false diff --git a/typedapi/types/uniquetokenfilter.go b/typedapi/types/uniquetokenfilter.go index b0c279624e..2d29b8ce15 100644 --- a/typedapi/types/uniquetokenfilter.go +++ b/typedapi/types/uniquetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UniqueTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L337-L340 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L336-L339 type UniqueTokenFilter struct { OnlyOnSamePosition *bool `json:"only_on_same_position,omitempty"` Type string `json:"type,omitempty"` @@ -102,3 +102,13 @@ func NewUniqueTokenFilter() *UniqueTokenFilter { return r } + +// true + +type UniqueTokenFilterVariant interface { + UniqueTokenFilterCaster() *UniqueTokenFilter +} + +func (s *UniqueTokenFilter) UniqueTokenFilterCaster() *UniqueTokenFilter { + return s +} diff --git a/typedapi/types/unmappedraretermsaggregate.go b/typedapi/types/unmappedraretermsaggregate.go index 74fcb80946..222bbb93a6 100644 --- a/typedapi/types/unmappedraretermsaggregate.go +++ b/typedapi/types/unmappedraretermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // UnmappedRareTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L455-L461 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L493-L499 type UnmappedRareTermsAggregate struct { Buckets BucketsVoid `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewUnmappedRareTermsAggregate() *UnmappedRareTermsAggregate { return r } + +// false diff --git a/typedapi/types/unmappedsampleraggregate.go b/typedapi/types/unmappedsampleraggregate.go index a7be9ae8ba..704e056efa 100644 --- a/typedapi/types/unmappedsampleraggregate.go +++ b/typedapi/types/unmappedsampleraggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // UnmappedSamplerAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L505-L506 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L561-L562 type UnmappedSamplerAggregate struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -491,6 +491,13 @@ func (s *UnmappedSamplerAggregate) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -624,8 +631,10 @@ func (s UnmappedSamplerAggregate) MarshalJSON() ([]byte, error) { // NewUnmappedSamplerAggregate returns a UnmappedSamplerAggregate. func NewUnmappedSamplerAggregate() *UnmappedSamplerAggregate { r := &UnmappedSamplerAggregate{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/unmappedsignificanttermsaggregate.go b/typedapi/types/unmappedsignificanttermsaggregate.go index b024609d31..64c728ea1e 100644 --- a/typedapi/types/unmappedsignificanttermsaggregate.go +++ b/typedapi/types/unmappedsignificanttermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UnmappedSignificantTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L614-L620 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L690-L696 type UnmappedSignificantTermsAggregate struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsVoid `json:"buckets"` @@ -121,3 +121,5 @@ func NewUnmappedSignificantTermsAggregate() *UnmappedSignificantTermsAggregate { return r } + +// false diff --git a/typedapi/types/unmappedtermsaggregate.go b/typedapi/types/unmappedtermsaggregate.go index a1b9a5d46f..cfee3121d8 100644 --- a/typedapi/types/unmappedtermsaggregate.go +++ b/typedapi/types/unmappedtermsaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UnmappedTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L425-L431 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L463-L469 type UnmappedTermsAggregate struct { Buckets BucketsVoid `json:"buckets"` DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` @@ -121,3 +121,5 @@ func NewUnmappedTermsAggregate() *UnmappedTermsAggregate { return r } + +// false diff --git a/typedapi/types/unrateddocument.go b/typedapi/types/unrateddocument.go index 7fcd028240..3512c11faa 100644 --- a/typedapi/types/unrateddocument.go +++ b/typedapi/types/unrateddocument.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // UnratedDocument type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/rank_eval/types.ts#L150-L153 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/rank_eval/types.ts#L150-L153 type UnratedDocument struct { Id_ string `json:"_id"` Index_ string `json:"_index"` @@ -72,3 +72,5 @@ func NewUnratedDocument() *UnratedDocument { return r } + +// false diff --git a/typedapi/types/unsignedlongnumberproperty.go b/typedapi/types/unsignedlongnumberproperty.go index 5f34ecb0da..c792d97c7f 100644 --- a/typedapi/types/unsignedlongnumberproperty.go +++ b/typedapi/types/unsignedlongnumberproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,12 +30,13 @@ import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // UnsignedLongNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L177-L180 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L181-L184 type UnsignedLongNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,13 +48,13 @@ type UnsignedLongNumberProperty struct { IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` Index *bool `json:"index,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *uint64 `json:"null_value,omitempty"` - OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Script *Script `json:"script,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *uint64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. // Defaults to false. TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` @@ -162,301 +163,313 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -543,301 +556,313 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } @@ -848,18 +873,6 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Script", err) } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -874,6 +887,11 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "time_series_dimension": var tmp any dec.Decode(&tmp) @@ -921,8 +939,8 @@ func (s UnsignedLongNumberProperty) MarshalJSON() ([]byte, error) { OnScriptError: s.OnScriptError, Properties: s.Properties, Script: s.Script, - Similarity: s.Similarity, Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, TimeSeriesDimension: s.TimeSeriesDimension, TimeSeriesMetric: s.TimeSeriesMetric, Type: s.Type, @@ -936,10 +954,20 @@ func (s UnsignedLongNumberProperty) MarshalJSON() ([]byte, error) { // NewUnsignedLongNumberProperty returns a UnsignedLongNumberProperty. func NewUnsignedLongNumberProperty() *UnsignedLongNumberProperty { r := &UnsignedLongNumberProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type UnsignedLongNumberPropertyVariant interface { + UnsignedLongNumberPropertyCaster() *UnsignedLongNumberProperty +} + +func (s *UnsignedLongNumberProperty) UnsignedLongNumberPropertyCaster() *UnsignedLongNumberProperty { + return s +} diff --git a/typedapi/types/untypeddecayfunction.go b/typedapi/types/untypeddecayfunction.go index 243a6b52cb..fb598dab4c 100644 --- a/typedapi/types/untypeddecayfunction.go +++ b/typedapi/types/untypeddecayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,7 +29,7 @@ import ( // UntypedDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/compound.ts#L191-L194 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/compound.ts#L204-L207 type UntypedDecayFunction struct { DecayFunctionBase map[string]DecayPlacement `json:"-"` // MultiValueMode Determines how the distance is calculated when a field used for computing the @@ -69,8 +69,18 @@ func (s UntypedDecayFunction) MarshalJSON() ([]byte, error) { // NewUntypedDecayFunction returns a UntypedDecayFunction. func NewUntypedDecayFunction() *UntypedDecayFunction { r := &UntypedDecayFunction{ - DecayFunctionBase: make(map[string]DecayPlacement, 0), + DecayFunctionBase: make(map[string]DecayPlacement), } return r } + +// true + +type UntypedDecayFunctionVariant interface { + UntypedDecayFunctionCaster() *UntypedDecayFunction +} + +func (s *UntypedDecayFunction) UntypedDecayFunctionCaster() *UntypedDecayFunction { + return s +} diff --git a/typedapi/types/untypeddistancefeaturequery.go b/typedapi/types/untypeddistancefeaturequery.go index eac3468550..5701dddb66 100644 --- a/typedapi/types/untypeddistancefeaturequery.go +++ b/typedapi/types/untypeddistancefeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UntypedDistanceFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/specialized.ts#L62-L65 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/specialized.ts#L61-L64 type UntypedDistanceFeatureQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -131,3 +131,13 @@ func NewUntypedDistanceFeatureQuery() *UntypedDistanceFeatureQuery { return r } + +// true + +type UntypedDistanceFeatureQueryVariant interface { + UntypedDistanceFeatureQueryCaster() *UntypedDistanceFeatureQuery +} + +func (s *UntypedDistanceFeatureQuery) UntypedDistanceFeatureQueryCaster() *UntypedDistanceFeatureQuery { + return s +} diff --git a/typedapi/types/untypedrangequery.go b/typedapi/types/untypedrangequery.go index 123575c57f..33fd15414f 100644 --- a/typedapi/types/untypedrangequery.go +++ b/typedapi/types/untypedrangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // UntypedRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L135-L144 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L150-L159 type UntypedRangeQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -160,3 +160,13 @@ func NewUntypedRangeQuery() *UntypedRangeQuery { return r } + +// true + +type UntypedRangeQueryVariant interface { + UntypedRangeQueryCaster() *UntypedRangeQuery +} + +func (s *UntypedRangeQuery) UntypedRangeQueryCaster() *UntypedRangeQuery { + return s +} diff --git a/typedapi/types/updateaction.go b/typedapi/types/updateaction.go index 83ffea2499..7eb1d30c0e 100644 --- a/typedapi/types/updateaction.go +++ b/typedapi/types/updateaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,26 +31,26 @@ import ( // UpdateAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/types.ts#L169-L205 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/types.ts#L182-L217 type UpdateAction struct { - // DetectNoop Set to false to disable setting 'result' in the response - // to 'noop' if no change to the document occurred. + // DetectNoop If true, the `result` in the response is set to 'noop' when no changes to the + // document occur. DetectNoop *bool `json:"detect_noop,omitempty"` // Doc A partial update to an existing document. Doc json.RawMessage `json:"doc,omitempty"` - // DocAsUpsert Set to true to use the contents of 'doc' as the value of 'upsert' + // DocAsUpsert Set to `true` to use the contents of `doc` as the value of `upsert`. DocAsUpsert *bool `json:"doc_as_upsert,omitempty"` - // Script Script to execute to update the document. + // Script The script to run to update the document. Script *Script `json:"script,omitempty"` - // ScriptedUpsert Set to true to execute the script whether or not the document exists. + // ScriptedUpsert Set to `true` to run the script whether or not the document exists. ScriptedUpsert *bool `json:"scripted_upsert,omitempty"` - // Source_ Set to false to disable source retrieval. You can also specify a - // comma-separated - // list of the fields you want to retrieve. + // Source_ If `false`, source retrieval is turned off. + // You can also specify a comma-separated list of the fields you want to + // retrieve. Source_ SourceConfig `json:"_source,omitempty"` - // Upsert If the document does not already exist, the contents of 'upsert' are inserted - // as a - // new document. If the document exists, the 'script' is executed. + // Upsert If the document does not already exist, the contents of `upsert` are inserted + // as a new document. + // If the document exists, the `script` is run. Upsert json.RawMessage `json:"upsert,omitempty"` } @@ -173,3 +173,13 @@ func NewUpdateAction() *UpdateAction { return r } + +// true + +type UpdateActionVariant interface { + UpdateActionCaster() *UpdateAction +} + +func (s *UpdateAction) UpdateActionCaster() *UpdateAction { + return s +} diff --git a/typedapi/types/updatebyqueryrethrottlenode.go b/typedapi/types/updatebyqueryrethrottlenode.go index af0b90b0a6..2d552ec1ed 100644 --- a/typedapi/types/updatebyqueryrethrottlenode.go +++ b/typedapi/types/updatebyqueryrethrottlenode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // UpdateByQueryRethrottleNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleNode.ts#L25-L27 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleNode.ts#L25-L27 type UpdateByQueryRethrottleNode struct { Attributes map[string]string `json:"attributes"` Host string `json:"host"` @@ -107,9 +107,11 @@ func (s *UpdateByQueryRethrottleNode) UnmarshalJSON(data []byte) error { // NewUpdateByQueryRethrottleNode returns a UpdateByQueryRethrottleNode. func NewUpdateByQueryRethrottleNode() *UpdateByQueryRethrottleNode { r := &UpdateByQueryRethrottleNode{ - Attributes: make(map[string]string, 0), - Tasks: make(map[string]TaskInfo, 0), + Attributes: make(map[string]string), + Tasks: make(map[string]TaskInfo), } return r } + +// false diff --git a/typedapi/types/updateoperation.go b/typedapi/types/updateoperation.go index 621c5c5a91..19f35e47c2 100644 --- a/typedapi/types/updateoperation.go +++ b/typedapi/types/updateoperation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,18 +33,20 @@ import ( // UpdateOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/types.ts#L136-L143 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_global/bulk/types.ts#L146-L156 type UpdateOperation struct { // Id_ The document ID. Id_ *string `json:"_id,omitempty"` IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. + // Index_ The name of the index or index alias to perform the action on. Index_ *string `json:"_index,omitempty"` - // RequireAlias If `true`, the request’s actions must target an index alias. - RequireAlias *bool `json:"require_alias,omitempty"` - RetryOnConflict *int `json:"retry_on_conflict,omitempty"` - // Routing Custom value used to route operations to a specific shard. + // RequireAlias If `true`, the request's actions must target an index alias. + RequireAlias *bool `json:"require_alias,omitempty"` + // RetryOnConflict The number of times an update should be retried in the case of a version + // conflict. + RetryOnConflict *int `json:"retry_on_conflict,omitempty"` + // Routing A custom value used to route operations to a specific shard. Routing *string `json:"routing,omitempty"` Version *int64 `json:"version,omitempty"` VersionType *versiontype.VersionType `json:"version_type,omitempty"` @@ -151,3 +153,13 @@ func NewUpdateOperation() *UpdateOperation { return r } + +// true + +type UpdateOperationVariant interface { + UpdateOperationCaster() *UpdateOperation +} + +func (s *UpdateOperation) UpdateOperationCaster() *UpdateOperation { + return s +} diff --git a/typedapi/types/uppercaseprocessor.go b/typedapi/types/uppercaseprocessor.go index 2ee44c14a7..364e7dfa84 100644 --- a/typedapi/types/uppercaseprocessor.go +++ b/typedapi/types/uppercaseprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UppercaseProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L1142-L1158 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1543-L1559 type UppercaseProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -160,3 +160,13 @@ func NewUppercaseProcessor() *UppercaseProcessor { return r } + +// true + +type UppercaseProcessorVariant interface { + UppercaseProcessorCaster() *UppercaseProcessor +} + +func (s *UppercaseProcessor) UppercaseProcessorCaster() *UppercaseProcessor { + return s +} diff --git a/typedapi/types/uppercasetokenfilter.go b/typedapi/types/uppercasetokenfilter.go index c9d024a893..dcd6bbc269 100644 --- a/typedapi/types/uppercasetokenfilter.go +++ b/typedapi/types/uppercasetokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // UppercaseTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L342-L344 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L341-L343 type UppercaseTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewUppercaseTokenFilter() *UppercaseTokenFilter { return r } + +// true + +type UppercaseTokenFilterVariant interface { + UppercaseTokenFilterCaster() *UppercaseTokenFilter +} + +func (s *UppercaseTokenFilter) UppercaseTokenFilterCaster() *UppercaseTokenFilter { + return s +} diff --git a/typedapi/types/uripartsprocessor.go b/typedapi/types/uripartsprocessor.go new file mode 100644 index 0000000000..54bf7da77e --- /dev/null +++ b/typedapi/types/uripartsprocessor.go @@ -0,0 +1,205 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UriPartsProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1579-L1605 +type UriPartsProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Field containing the URI string. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // KeepOriginal If `true`, the processor copies the unparsed URI to + // `.original`. + KeepOriginal *bool `json:"keep_original,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // RemoveIfSuccessful If `true`, the processor removes the `field` after parsing the URI string. + // If parsing fails, the processor does not remove the `field`. + RemoveIfSuccessful *bool `json:"remove_if_successful,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Output field for the URI object. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *UriPartsProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "keep_original": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "KeepOriginal", err) + } + s.KeepOriginal = &value + case bool: + s.KeepOriginal = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "remove_if_successful": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemoveIfSuccessful", err) + } + s.RemoveIfSuccessful = &value + case bool: + s.RemoveIfSuccessful = &v + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewUriPartsProcessor returns a UriPartsProcessor. +func NewUriPartsProcessor() *UriPartsProcessor { + r := &UriPartsProcessor{} + + return r +} + +// true + +type UriPartsProcessorVariant interface { + UriPartsProcessorCaster() *UriPartsProcessor +} + +func (s *UriPartsProcessor) UriPartsProcessorCaster() *UriPartsProcessor { + return s +} diff --git a/typedapi/types/urldecodeprocessor.go b/typedapi/types/urldecodeprocessor.go index d2f2bb65a4..c69911108a 100644 --- a/typedapi/types/urldecodeprocessor.go +++ b/typedapi/types/urldecodeprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UrlDecodeProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L1160-L1176 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L1561-L1577 type UrlDecodeProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. @@ -160,3 +160,13 @@ func NewUrlDecodeProcessor() *UrlDecodeProcessor { return r } + +// true + +type UrlDecodeProcessorVariant interface { + UrlDecodeProcessorCaster() *UrlDecodeProcessor +} + +func (s *UrlDecodeProcessor) UrlDecodeProcessorCaster() *UrlDecodeProcessor { + return s +} diff --git a/typedapi/types/usagephase.go b/typedapi/types/usagephase.go new file mode 100644 index 0000000000..dd94061be1 --- /dev/null +++ b/typedapi/types/usagephase.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// UsagePhase type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L154-L157 +type UsagePhase struct { + Actions []string `json:"actions"` + MinAge int64 `json:"min_age"` +} + +func (s *UsagePhase) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return fmt.Errorf("%s | %w", "MinAge", err) + } + + } + } + return nil +} + +// NewUsagePhase returns a UsagePhase. +func NewUsagePhase() *UsagePhase { + r := &UsagePhase{} + + return r +} + +// false diff --git a/typedapi/types/usagephases.go b/typedapi/types/usagephases.go new file mode 100644 index 0000000000..31b6d94990 --- /dev/null +++ b/typedapi/types/usagephases.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +// UsagePhases type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L159-L165 +type UsagePhases struct { + Cold *UsagePhase `json:"cold,omitempty"` + Delete *UsagePhase `json:"delete,omitempty"` + Frozen *UsagePhase `json:"frozen,omitempty"` + Hot *UsagePhase `json:"hot,omitempty"` + Warm *UsagePhase `json:"warm,omitempty"` +} + +// NewUsagePhases returns a UsagePhases. +func NewUsagePhases() *UsagePhases { + r := &UsagePhases{} + + return r +} + +// false diff --git a/typedapi/types/usagestatsindex.go b/typedapi/types/usagestatsindex.go index cfd6d4fa08..e374e0efd1 100644 --- a/typedapi/types/usagestatsindex.go +++ b/typedapi/types/usagestatsindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // UsageStatsIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L41-L43 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L41-L43 type UsageStatsIndex struct { Shards []UsageStatsShards `json:"shards"` } @@ -33,3 +33,5 @@ func NewUsageStatsIndex() *UsageStatsIndex { return r } + +// false diff --git a/typedapi/types/usagestatsshards.go b/typedapi/types/usagestatsshards.go index 0f5d62c220..00bcff54ec 100644 --- a/typedapi/types/usagestatsshards.go +++ b/typedapi/types/usagestatsshards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UsageStatsShards type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L45-L50 type UsageStatsShards struct { Routing ShardRouting `json:"routing"` Stats IndicesShardsStats `json:"stats"` @@ -92,3 +92,5 @@ func NewUsageStatsShards() *UsageStatsShards { return r } + +// false diff --git a/typedapi/types/user.go b/typedapi/types/user.go index 64d7e40836..3c4d6ca05a 100644 --- a/typedapi/types/user.go +++ b/typedapi/types/user.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // User type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/User.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/User.ts#L23-L31 type User struct { Email *string `json:"email,omitempty"` Enabled bool `json:"enabled"` @@ -119,3 +119,5 @@ func NewUser() *User { return r } + +// false diff --git a/typedapi/types/useragentprocessor.go b/typedapi/types/useragentprocessor.go index bf9508b87c..275a7d3776 100644 --- a/typedapi/types/useragentprocessor.go +++ b/typedapi/types/useragentprocessor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,11 +33,13 @@ import ( // UserAgentProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ingest/_types/Processors.ts#L370-L390 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Processors.ts#L514-L545 type UserAgentProcessor struct { // Description Description of the processor. // Useful for describing the purpose of the processor or its configuration. Description *string `json:"description,omitempty"` + // ExtractDeviceType Extracts device type from the user agent string on a best-effort basis. + ExtractDeviceType *bool `json:"extract_device_type,omitempty"` // Field The field containing the user agent string. Field string `json:"field"` // If Conditionally execute the processor. @@ -48,8 +50,9 @@ type UserAgentProcessor struct { // modifying the document. IgnoreMissing *bool `json:"ignore_missing,omitempty"` // OnFailure Handle failures for the processor. - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Options []useragentproperty.UserAgentProperty `json:"options,omitempty"` + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Controls what properties are added to `target_field`. + Properties []useragentproperty.UserAgentProperty `json:"properties,omitempty"` // RegexFile The name of the file in the `config/ingest-user-agent` directory containing // the regular expressions for parsing the user agent string. Both the directory // and the file have to be created before starting Elasticsearch. If not @@ -90,6 +93,20 @@ func (s *UserAgentProcessor) UnmarshalJSON(data []byte) error { } s.Description = &o + case "extract_device_type": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExtractDeviceType", err) + } + s.ExtractDeviceType = &value + case bool: + s.ExtractDeviceType = &v + } + case "field": if err := dec.Decode(&s.Field); err != nil { return fmt.Errorf("%s | %w", "Field", err) @@ -140,9 +157,9 @@ func (s *UserAgentProcessor) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "OnFailure", err) } - case "options": - if err := dec.Decode(&s.Options); err != nil { - return fmt.Errorf("%s | %w", "Options", err) + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return fmt.Errorf("%s | %w", "Properties", err) } case "regex_file": @@ -185,3 +202,13 @@ func NewUserAgentProcessor() *UserAgentProcessor { return r } + +// true + +type UserAgentProcessorVariant interface { + UserAgentProcessorCaster() *UserAgentProcessor +} + +func (s *UserAgentProcessor) UserAgentProcessorCaster() *UserAgentProcessor { + return s +} diff --git a/typedapi/types/userindicesprivileges.go b/typedapi/types/userindicesprivileges.go index a75c6d2b87..bbe2f27508 100644 --- a/typedapi/types/userindicesprivileges.go +++ b/typedapi/types/userindicesprivileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // UserIndicesPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/Privileges.ts#L223-L245 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/Privileges.ts#L292-L314 type UserIndicesPrivileges struct { // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that // cover restricted indices. Implicitly, restricted indices have limited @@ -91,19 +91,8 @@ func (s *UserIndicesPrivileges) UnmarshalJSON(data []byte) error { } case "names": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - if !bytes.HasPrefix(rawMsg, []byte("[")) { - o := new(string) - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { - return fmt.Errorf("%s | %w", "Names", err) - } - - s.Names = append(s.Names, *o) - } else { - if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { - return fmt.Errorf("%s | %w", "Names", err) - } + if err := dec.Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) } case "privileges": @@ -130,7 +119,7 @@ func (s *UserIndicesPrivileges) UnmarshalJSON(data []byte) error { switch t { - case "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": o := NewQuery() localDec := json.NewDecoder(bytes.NewReader(message)) if err := localDec.Decode(&o); err != nil { @@ -170,3 +159,5 @@ func NewUserIndicesPrivileges() *UserIndicesPrivileges { return r } + +// false diff --git a/typedapi/types/userprofile.go b/typedapi/types/userprofile.go index dc332c6084..63bcb1475f 100644 --- a/typedapi/types/userprofile.go +++ b/typedapi/types/userprofile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UserProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/UserProfile.ts#L42-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/UserProfile.ts#L41-L47 type UserProfile struct { Data map[string]json.RawMessage `json:"data"` Enabled *bool `json:"enabled,omitempty"` @@ -103,9 +103,11 @@ func (s *UserProfile) UnmarshalJSON(data []byte) error { // NewUserProfile returns a UserProfile. func NewUserProfile() *UserProfile { r := &UserProfile{ - Data: make(map[string]json.RawMessage, 0), - Labels: make(map[string]json.RawMessage, 0), + Data: make(map[string]json.RawMessage), + Labels: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/userprofilehitmetadata.go b/typedapi/types/userprofilehitmetadata.go index 62f2455947..5d6f7b7b8e 100644 --- a/typedapi/types/userprofilehitmetadata.go +++ b/typedapi/types/userprofilehitmetadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UserProfileHitMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/UserProfile.ts#L28-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/UserProfile.ts#L27-L30 type UserProfileHitMetadata struct { PrimaryTerm_ int64 `json:"_primary_term"` SeqNo_ int64 `json:"_seq_no"` @@ -83,3 +83,5 @@ func NewUserProfileHitMetadata() *UserProfileHitMetadata { return r } + +// false diff --git a/typedapi/types/userprofileuser.go b/typedapi/types/userprofileuser.go index 45041ec4a4..bad8e51904 100644 --- a/typedapi/types/userprofileuser.go +++ b/typedapi/types/userprofileuser.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UserProfileUser type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/UserProfile.ts#L33-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/UserProfile.ts#L32-L39 type UserProfileUser struct { Email *string `json:"email,omitempty"` FullName *string `json:"full_name,omitempty"` @@ -104,3 +104,5 @@ func NewUserProfileUser() *UserProfileUser { return r } + +// false diff --git a/typedapi/types/userprofilewithmetadata.go b/typedapi/types/userprofilewithmetadata.go index 793322d62e..6a3d97ee0b 100644 --- a/typedapi/types/userprofilewithmetadata.go +++ b/typedapi/types/userprofilewithmetadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UserProfileWithMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/_types/UserProfile.ts#L50-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/_types/UserProfile.ts#L49-L52 type UserProfileWithMetadata struct { Data map[string]json.RawMessage `json:"data"` Doc_ UserProfileHitMetadata `json:"_doc"` @@ -125,9 +125,11 @@ func (s *UserProfileWithMetadata) UnmarshalJSON(data []byte) error { // NewUserProfileWithMetadata returns a UserProfileWithMetadata. func NewUserProfileWithMetadata() *UserProfileWithMetadata { r := &UserProfileWithMetadata{ - Data: make(map[string]json.RawMessage, 0), - Labels: make(map[string]json.RawMessage, 0), + Data: make(map[string]json.RawMessage), + Labels: make(map[string]json.RawMessage), } return r } + +// false diff --git a/typedapi/types/userquerycontainer.go b/typedapi/types/userquerycontainer.go index 8e76b85f71..6e21ed5e09 100644 --- a/typedapi/types/userquerycontainer.go +++ b/typedapi/types/userquerycontainer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,8 +30,9 @@ import ( // UserQueryContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/query_user/types.ts#L37-L101 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/query_user/types.ts#L37-L101 type UserQueryContainer struct { + AdditionalUserQueryContainerProperty map[string]json.RawMessage `json:"-"` // Bool matches users matching boolean combinations of other queries. Bool *BoolQuery `json:"bool,omitempty"` // Exists Returns users that contain an indexed value for a field. @@ -159,20 +160,73 @@ func (s *UserQueryContainer) UnmarshalJSON(data []byte) error { return fmt.Errorf("%s | %w", "Wildcard", err) } + default: + + if key, ok := t.(string); ok { + if s.AdditionalUserQueryContainerProperty == nil { + s.AdditionalUserQueryContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalUserQueryContainerProperty", err) + } + s.AdditionalUserQueryContainerProperty[key] = *raw + } + } } return nil } +// MarhsalJSON overrides marshalling for types with additional properties +func (s UserQueryContainer) MarshalJSON() ([]byte, error) { + type opt UserQueryContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalUserQueryContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalUserQueryContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + // NewUserQueryContainer returns a UserQueryContainer. func NewUserQueryContainer() *UserQueryContainer { r := &UserQueryContainer{ - Match: make(map[string]MatchQuery, 0), - Prefix: make(map[string]PrefixQuery, 0), - Range: make(map[string]RangeQuery, 0), - Term: make(map[string]TermQuery, 0), - Wildcard: make(map[string]WildcardQuery, 0), + AdditionalUserQueryContainerProperty: make(map[string]json.RawMessage), + Match: make(map[string]MatchQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Term: make(map[string]TermQuery), + Wildcard: make(map[string]WildcardQuery), } return r } + +// true + +type UserQueryContainerVariant interface { + UserQueryContainerCaster() *UserQueryContainer +} + +func (s *UserQueryContainer) UserQueryContainerCaster() *UserQueryContainer { + return s +} diff --git a/typedapi/types/userrealm.go b/typedapi/types/userrealm.go index d00c0a27a5..aa0018873c 100644 --- a/typedapi/types/userrealm.go +++ b/typedapi/types/userrealm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // UserRealm type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/security/get_token/types.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/security/get_token/types.ts#L50-L53 type UserRealm struct { Name string `json:"name"` Type string `json:"type"` @@ -80,3 +80,5 @@ func NewUserRealm() *UserRealm { return r } + +// false diff --git a/typedapi/types/validation.go b/typedapi/types/validation.go index d425e60bde..380a16fe0e 100644 --- a/typedapi/types/validation.go +++ b/typedapi/types/validation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -28,5 +28,9 @@ package types // IncludedInValidation // RegexValidation // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/connector/_types/Connector.ts#L50-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/connector/_types/Connector.ts#L50-L56 type Validation any + +type ValidationVariant interface { + ValidationCaster() *Validation +} diff --git a/typedapi/types/validationloss.go b/typedapi/types/validationloss.go index d496231df5..e50555ca7c 100644 --- a/typedapi/types/validationloss.go +++ b/typedapi/types/validationloss.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ValidationLoss type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/DataframeAnalytics.ts#L570-L575 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/DataframeAnalytics.ts#L571-L576 type ValidationLoss struct { // FoldValues Validation loss values for every added decision tree during the forest // growing procedure. @@ -83,3 +83,5 @@ func NewValidationLoss() *ValidationLoss { return r } + +// false diff --git a/typedapi/types/valuecountaggregate.go b/typedapi/types/valuecountaggregate.go index 0198f08fb5..38fcf0a5d8 100644 --- a/typedapi/types/valuecountaggregate.go +++ b/typedapi/types/valuecountaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ValueCountAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L218-L222 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L231-L236 type ValueCountAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewValueCountAggregate() *ValueCountAggregate { return r } + +// false diff --git a/typedapi/types/valuecountaggregation.go b/typedapi/types/valuecountaggregation.go index df5564d762..24e09d7176 100644 --- a/typedapi/types/valuecountaggregation.go +++ b/typedapi/types/valuecountaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ValueCountAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L422-L422 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L434-L434 type ValueCountAggregation struct { // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` @@ -95,3 +95,13 @@ func NewValueCountAggregation() *ValueCountAggregation { return r } + +// true + +type ValueCountAggregationVariant interface { + ValueCountAggregationCaster() *ValueCountAggregation +} + +func (s *ValueCountAggregation) ValueCountAggregationCaster() *ValueCountAggregation { + return s +} diff --git a/typedapi/types/variablewidthhistogramaggregate.go b/typedapi/types/variablewidthhistogramaggregate.go index d6bd2cad6d..911d0e47da 100644 --- a/typedapi/types/variablewidthhistogramaggregate.go +++ b/typedapi/types/variablewidthhistogramaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // VariableWidthHistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L364-L366 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L402-L404 type VariableWidthHistogramAggregate struct { Buckets BucketsVariableWidthHistogramBucket `json:"buckets"` Meta Metadata `json:"meta,omitempty"` @@ -88,3 +88,5 @@ func NewVariableWidthHistogramAggregate() *VariableWidthHistogramAggregate { return r } + +// false diff --git a/typedapi/types/variablewidthhistogramaggregation.go b/typedapi/types/variablewidthhistogramaggregation.go index 5d4672503e..f4fda56af2 100644 --- a/typedapi/types/variablewidthhistogramaggregation.go +++ b/typedapi/types/variablewidthhistogramaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // VariableWidthHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/bucket.ts#L1022-L1043 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/bucket.ts#L1091-L1115 type VariableWidthHistogramAggregation struct { // Buckets The target number of buckets. Buckets *int `json:"buckets,omitempty"` @@ -132,3 +132,13 @@ func NewVariableWidthHistogramAggregation() *VariableWidthHistogramAggregation { return r } + +// true + +type VariableWidthHistogramAggregationVariant interface { + VariableWidthHistogramAggregationCaster() *VariableWidthHistogramAggregation +} + +func (s *VariableWidthHistogramAggregation) VariableWidthHistogramAggregationCaster() *VariableWidthHistogramAggregation { + return s +} diff --git a/typedapi/types/variablewidthhistogrambucket.go b/typedapi/types/variablewidthhistogrambucket.go index 2fd41d3abb..508c7906c0 100644 --- a/typedapi/types/variablewidthhistogrambucket.go +++ b/typedapi/types/variablewidthhistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,7 +32,7 @@ import ( // VariableWidthHistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L368-L375 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L406-L413 type VariableWidthHistogramBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -575,6 +575,13 @@ func (s *VariableWidthHistogramBucket) UnmarshalJSON(data []byte) error { } s.Aggregations[elems[1]] = o + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() if err := dec.Decode(&o); err != nil { @@ -708,8 +715,10 @@ func (s VariableWidthHistogramBucket) MarshalJSON() ([]byte, error) { // NewVariableWidthHistogramBucket returns a VariableWidthHistogramBucket. func NewVariableWidthHistogramBucket() *VariableWidthHistogramBucket { r := &VariableWidthHistogramBucket{ - Aggregations: make(map[string]Aggregate, 0), + Aggregations: make(map[string]Aggregate), } return r } + +// false diff --git a/typedapi/types/vector.go b/typedapi/types/vector.go index 69127de1ab..bbde0e8460 100644 --- a/typedapi/types/vector.go +++ b/typedapi/types/vector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Vector type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L454-L458 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L464-L468 type Vector struct { Available bool `json:"available"` DenseVectorDimsAvgCount int `json:"dense_vector_dims_avg_count"` @@ -142,3 +142,5 @@ func NewVector() *Vector { return r } + +// false diff --git a/typedapi/types/verifyindex.go b/typedapi/types/verifyindex.go index 0fe83868ee..64dd225cd4 100644 --- a/typedapi/types/verifyindex.go +++ b/typedapi/types/verifyindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // VerifyIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/indices/recovery/types.ts#L111-L116 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/indices/recovery/types.ts#L111-L116 type VerifyIndex struct { CheckIndexTime Duration `json:"check_index_time,omitempty"` CheckIndexTimeInMillis int64 `json:"check_index_time_in_millis"` @@ -84,3 +84,5 @@ func NewVerifyIndex() *VerifyIndex { return r } + +// false diff --git a/typedapi/types/versionproperty.go b/typedapi/types/versionproperty.go index 2df9b7e730..6eac2a4dc2 100644 --- a/typedapi/types/versionproperty.go +++ b/typedapi/types/versionproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // VersionProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L287-L289 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L305-L307 type VersionProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -41,11 +42,11 @@ type VersionProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *VersionProperty) UnmarshalJSON(data []byte) error { @@ -117,301 +118,313 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -460,318 +473,318 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -786,6 +799,11 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -800,16 +818,16 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { func (s VersionProperty) MarshalJSON() ([]byte, error) { type innerVersionProperty VersionProperty tmp := innerVersionProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "version" @@ -820,10 +838,20 @@ func (s VersionProperty) MarshalJSON() ([]byte, error) { // NewVersionProperty returns a VersionProperty. func NewVersionProperty() *VersionProperty { r := &VersionProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type VersionPropertyVariant interface { + VersionPropertyCaster() *VersionProperty +} + +func (s *VersionProperty) VersionPropertyCaster() *VersionProperty { + return s +} diff --git a/typedapi/types/vertex.go b/typedapi/types/vertex.go index 7d56addecf..14e51c7e8f 100644 --- a/typedapi/types/vertex.go +++ b/typedapi/types/vertex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Vertex type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/graph/_types/Vertex.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/graph/_types/Vertex.ts#L23-L28 type Vertex struct { Depth int64 `json:"depth"` Field string `json:"field"` @@ -113,3 +113,5 @@ func NewVertex() *Vertex { return r } + +// false diff --git a/typedapi/types/vertexdefinition.go b/typedapi/types/vertexdefinition.go index 05473b2cef..7b891d1600 100644 --- a/typedapi/types/vertexdefinition.go +++ b/typedapi/types/vertexdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // VertexDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/graph/_types/Vertex.ts#L30-L59 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/graph/_types/Vertex.ts#L30-L59 type VertexDefinition struct { // Exclude Prevents the specified terms from being included in the results. Exclude []string `json:"exclude,omitempty"` @@ -138,3 +138,13 @@ func NewVertexDefinition() *VertexDefinition { return r } + +// true + +type VertexDefinitionVariant interface { + VertexDefinitionCaster() *VertexDefinition +} + +func (s *VertexDefinition) VertexDefinitionCaster() *VertexDefinition { + return s +} diff --git a/typedapi/types/vertexinclude.go b/typedapi/types/vertexinclude.go index 404be10657..2b7b308f3e 100644 --- a/typedapi/types/vertexinclude.go +++ b/typedapi/types/vertexinclude.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // VertexInclude type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/graph/_types/Vertex.ts#L61-L64 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/graph/_types/Vertex.ts#L61-L64 type VertexInclude struct { Boost Float64 `json:"boost"` Term string `json:"term"` @@ -91,3 +91,13 @@ func NewVertexInclude() *VertexInclude { return r } + +// true + +type VertexIncludeVariant interface { + VertexIncludeCaster() *VertexInclude +} + +func (s *VertexInclude) VertexIncludeCaster() *VertexInclude { + return s +} diff --git a/typedapi/types/vocabulary.go b/typedapi/types/vocabulary.go index 8c09f9f529..6d3a48741f 100644 --- a/typedapi/types/vocabulary.go +++ b/typedapi/types/vocabulary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // Vocabulary type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L233-L235 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L217-L219 type Vocabulary struct { Index string `json:"index"` } @@ -66,3 +66,13 @@ func NewVocabulary() *Vocabulary { return r } + +// true + +type VocabularyVariant interface { + VocabularyCaster() *Vocabulary +} + +func (s *Vocabulary) VocabularyCaster() *Vocabulary { + return s +} diff --git a/typedapi/types/void.go b/typedapi/types/void.go new file mode 100755 index 0000000000..8fcf1dd08f --- /dev/null +++ b/typedapi/types/void.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +type Void any diff --git a/typedapi/types/waitforactiveshards.go b/typedapi/types/waitforactiveshards.go index 5de6bda747..aacd089509 100644 --- a/typedapi/types/waitforactiveshards.go +++ b/typedapi/types/waitforactiveshards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -25,5 +25,5 @@ package types // int // waitforactiveshardoptions.WaitForActiveShardOptions // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/common.ts#L142-L143 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/common.ts#L146-L147 type WaitForActiveShards any diff --git a/typedapi/types/waitforsnapshotaction.go b/typedapi/types/waitforsnapshotaction.go index 692c4dbe86..ecb7536670 100644 --- a/typedapi/types/waitforsnapshotaction.go +++ b/typedapi/types/waitforsnapshotaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WaitForSnapshotAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ilm/_types/Phase.ts#L148-L150 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ilm/_types/Phase.ts#L145-L147 type WaitForSnapshotAction struct { Policy string `json:"policy"` } @@ -74,3 +74,13 @@ func NewWaitForSnapshotAction() *WaitForSnapshotAction { return r } + +// true + +type WaitForSnapshotActionVariant interface { + WaitForSnapshotActionCaster() *WaitForSnapshotAction +} + +func (s *WaitForSnapshotAction) WaitForSnapshotActionCaster() *WaitForSnapshotAction { + return s +} diff --git a/typedapi/types/warmerstats.go b/typedapi/types/warmerstats.go index e131cd8b5f..5e1900f563 100644 --- a/typedapi/types/warmerstats.go +++ b/typedapi/types/warmerstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WarmerStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Stats.ts#L407-L412 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Stats.ts#L410-L415 type WarmerStats struct { Current int64 `json:"current"` Total int64 `json:"total"` @@ -105,3 +105,5 @@ func NewWarmerStats() *WarmerStats { return r } + +// false diff --git a/typedapi/types/watch.go b/typedapi/types/watch.go index 61a8460b39..45dd54cca8 100644 --- a/typedapi/types/watch.go +++ b/typedapi/types/watch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // Watch type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Watch.ts#L37-L47 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Watch.ts#L37-L47 type Watch struct { Actions map[string]WatcherAction `json:"actions"` Condition WatcherCondition `json:"condition"` @@ -114,8 +114,18 @@ func (s *Watch) UnmarshalJSON(data []byte) error { // NewWatch returns a Watch. func NewWatch() *Watch { r := &Watch{ - Actions: make(map[string]WatcherAction, 0), + Actions: make(map[string]WatcherAction), } return r } + +// true + +type WatchVariant interface { + WatchCaster() *Watch +} + +func (s *Watch) WatchCaster() *Watch { + return s +} diff --git a/typedapi/types/watcher.go b/typedapi/types/watcher.go index 573fbac10f..23e3ac4012 100644 --- a/typedapi/types/watcher.go +++ b/typedapi/types/watcher.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Watcher type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L460-L464 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L470-L474 type Watcher struct { Available bool `json:"available"` Count Counter `json:"count"` @@ -109,3 +109,5 @@ func NewWatcher() *Watcher { return r } + +// false diff --git a/typedapi/types/watcheraction.go b/typedapi/types/watcheraction.go index 68b17154a6..c9e9e0aaaa 100644 --- a/typedapi/types/watcheraction.go +++ b/typedapi/types/watcheraction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // WatcherAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L41-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L35-L54 type WatcherAction struct { ActionType *actiontype.ActionType `json:"action_type,omitempty"` Condition *WatcherCondition `json:"condition,omitempty"` @@ -165,3 +165,13 @@ func NewWatcherAction() *WatcherAction { return r } + +// true + +type WatcherActionVariant interface { + WatcherActionCaster() *WatcherAction +} + +func (s *WatcherAction) WatcherActionCaster() *WatcherAction { + return s +} diff --git a/typedapi/types/watcheractions.go b/typedapi/types/watcheractions.go index 44de64cb73..940a4d2a31 100644 --- a/typedapi/types/watcheractions.go +++ b/typedapi/types/watcheractions.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // WatcherActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L396-L398 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L406-L408 type WatcherActions struct { Actions map[string]WatcherActionTotals `json:"actions"` } @@ -30,8 +30,10 @@ type WatcherActions struct { // NewWatcherActions returns a WatcherActions. func NewWatcherActions() *WatcherActions { r := &WatcherActions{ - Actions: make(map[string]WatcherActionTotals, 0), + Actions: make(map[string]WatcherActionTotals), } return r } + +// false diff --git a/typedapi/types/watcheractiontotals.go b/typedapi/types/watcheractiontotals.go index 5b38d59779..7d96fadb45 100644 --- a/typedapi/types/watcheractiontotals.go +++ b/typedapi/types/watcheractiontotals.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // WatcherActionTotals type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L412-L415 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L422-L425 type WatcherActionTotals struct { Total Duration `json:"total"` TotalTimeInMs int64 `json:"total_time_in_ms"` @@ -72,3 +72,5 @@ func NewWatcherActionTotals() *WatcherActionTotals { return r } + +// false diff --git a/typedapi/types/watchercondition.go b/typedapi/types/watchercondition.go index 89af3a0634..8385ca5d34 100644 --- a/typedapi/types/watchercondition.go +++ b/typedapi/types/watchercondition.go @@ -16,31 +16,75 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types import ( + "encoding/json" + "fmt" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop" ) // WatcherCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Conditions.ts#L50-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Conditions.ts#L50-L62 type WatcherCondition struct { - Always *AlwaysCondition `json:"always,omitempty"` - ArrayCompare map[string]ArrayCompareCondition `json:"array_compare,omitempty"` - Compare map[string]map[conditionop.ConditionOp]FieldValue `json:"compare,omitempty"` - Never *NeverCondition `json:"never,omitempty"` - Script *ScriptCondition `json:"script,omitempty"` + AdditionalWatcherConditionProperty map[string]json.RawMessage `json:"-"` + Always *AlwaysCondition `json:"always,omitempty"` + ArrayCompare map[string]ArrayCompareCondition `json:"array_compare,omitempty"` + Compare map[string]map[conditionop.ConditionOp]FieldValue `json:"compare,omitempty"` + Never *NeverCondition `json:"never,omitempty"` + Script *ScriptCondition `json:"script,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s WatcherCondition) MarshalJSON() ([]byte, error) { + type opt WatcherCondition + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalWatcherConditionProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalWatcherConditionProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewWatcherCondition returns a WatcherCondition. func NewWatcherCondition() *WatcherCondition { r := &WatcherCondition{ - ArrayCompare: make(map[string]ArrayCompareCondition, 0), - Compare: make(map[string]map[conditionop.ConditionOp]FieldValue, 0), + AdditionalWatcherConditionProperty: make(map[string]json.RawMessage), + ArrayCompare: make(map[string]ArrayCompareCondition), + Compare: make(map[string]map[conditionop.ConditionOp]FieldValue), } return r } + +// true + +type WatcherConditionVariant interface { + WatcherConditionCaster() *WatcherCondition +} + +func (s *WatcherCondition) WatcherConditionCaster() *WatcherCondition { + return s +} diff --git a/typedapi/types/watcherinput.go b/typedapi/types/watcherinput.go index eeeb62d01f..b2b3ef7e95 100644 --- a/typedapi/types/watcherinput.go +++ b/typedapi/types/watcherinput.go @@ -16,29 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types import ( "encoding/json" + "fmt" ) // WatcherInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Input.ts#L90-L98 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Input.ts#L90-L98 type WatcherInput struct { - Chain *ChainInput `json:"chain,omitempty"` - Http *HttpInput `json:"http,omitempty"` - Search *SearchInput `json:"search,omitempty"` - Simple map[string]json.RawMessage `json:"simple,omitempty"` + AdditionalWatcherInputProperty map[string]json.RawMessage `json:"-"` + Chain *ChainInput `json:"chain,omitempty"` + Http *HttpInput `json:"http,omitempty"` + Search *SearchInput `json:"search,omitempty"` + Simple map[string]json.RawMessage `json:"simple,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s WatcherInput) MarshalJSON() ([]byte, error) { + type opt WatcherInput + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalWatcherInputProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalWatcherInputProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil } // NewWatcherInput returns a WatcherInput. func NewWatcherInput() *WatcherInput { r := &WatcherInput{ - Simple: make(map[string]json.RawMessage, 0), + AdditionalWatcherInputProperty: make(map[string]json.RawMessage), + Simple: make(map[string]json.RawMessage), } return r } + +// true + +type WatcherInputVariant interface { + WatcherInputCaster() *WatcherInput +} + +func (s *WatcherInput) WatcherInputCaster() *WatcherInput { + return s +} diff --git a/typedapi/types/watchernodestats.go b/typedapi/types/watchernodestats.go index a3b57a75f0..4de45513fe 100644 --- a/typedapi/types/watchernodestats.go +++ b/typedapi/types/watchernodestats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,14 +33,35 @@ import ( // WatcherNodeStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/stats/types.ts#L33-L40 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/stats/types.ts#L33-L61 type WatcherNodeStats struct { - CurrentWatches []WatchRecordStats `json:"current_watches,omitempty"` - ExecutionThreadPool ExecutionThreadPool `json:"execution_thread_pool"` - NodeId string `json:"node_id"` - QueuedWatches []WatchRecordQueuedStats `json:"queued_watches,omitempty"` - WatchCount int64 `json:"watch_count"` - WatcherState watcherstate.WatcherState `json:"watcher_state"` + // CurrentWatches The current executing watches metric gives insight into the watches that are + // currently being executed by Watcher. + // Additional information is shared per watch that is currently executing. + // This information includes the `watch_id`, the time its execution started and + // its current execution phase. + // To include this metric, the `metric` option should be set to + // `current_watches` or `_all`. + // In addition you can also specify the `emit_stacktraces=true` parameter, which + // adds stack traces for each watch that is being run. + // These stack traces can give you more insight into an execution of a watch. + CurrentWatches []WatchRecordStats `json:"current_watches,omitempty"` + ExecutionThreadPool ExecutionThreadPool `json:"execution_thread_pool"` + NodeId string `json:"node_id"` + // QueuedWatches Watcher moderates the execution of watches such that their execution won't + // put too much pressure on the node and its resources. + // If too many watches trigger concurrently and there isn't enough capacity to + // run them all, some of the watches are queued, waiting for the current running + // watches to finish.s + // The queued watches metric gives insight on these queued watches. + // + // To include this metric, the `metric` option should include `queued_watches` + // or `_all`. + QueuedWatches []WatchRecordQueuedStats `json:"queued_watches,omitempty"` + // WatchCount The number of watches currently registered. + WatchCount int64 `json:"watch_count"` + // WatcherState The current state of Watcher. + WatcherState watcherstate.WatcherState `json:"watcher_state"` } func (s *WatcherNodeStats) UnmarshalJSON(data []byte) error { @@ -109,3 +130,5 @@ func NewWatcherNodeStats() *WatcherNodeStats { return r } + +// false diff --git a/typedapi/types/watcherstatusactions.go b/typedapi/types/watcherstatusactions.go index 74e0ee54b1..22ee284e6e 100644 --- a/typedapi/types/watcherstatusactions.go +++ b/typedapi/types/watcherstatusactions.go @@ -16,11 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // WatcherStatusActions type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Action.ts#L62-L62 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Action.ts#L56-L56 type WatcherStatusActions map[string]ActionStatus + +type WatcherStatusActionsVariant interface { + WatcherStatusActionsCaster() *WatcherStatusActions +} diff --git a/typedapi/types/watcherwatch.go b/typedapi/types/watcherwatch.go index f03d52abfa..a067530405 100644 --- a/typedapi/types/watcherwatch.go +++ b/typedapi/types/watcherwatch.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // WatcherWatch type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L400-L405 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L410-L415 type WatcherWatch struct { Action map[string]Counter `json:"action,omitempty"` Condition map[string]Counter `json:"condition,omitempty"` @@ -33,10 +33,12 @@ type WatcherWatch struct { // NewWatcherWatch returns a WatcherWatch. func NewWatcherWatch() *WatcherWatch { r := &WatcherWatch{ - Action: make(map[string]Counter, 0), - Condition: make(map[string]Counter, 0), - Input: make(map[string]Counter, 0), + Action: make(map[string]Counter), + Condition: make(map[string]Counter), + Input: make(map[string]Counter), } return r } + +// false diff --git a/typedapi/types/watcherwatchtrigger.go b/typedapi/types/watcherwatchtrigger.go index 2245b191a8..6d3841e948 100644 --- a/typedapi/types/watcherwatchtrigger.go +++ b/typedapi/types/watcherwatchtrigger.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // WatcherWatchTrigger type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L407-L410 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L417-L420 type WatcherWatchTrigger struct { All_ Counter `json:"_all"` Schedule *WatcherWatchTriggerSchedule `json:"schedule,omitempty"` @@ -34,3 +34,5 @@ func NewWatcherWatchTrigger() *WatcherWatchTrigger { return r } + +// false diff --git a/typedapi/types/watcherwatchtriggerschedule.go b/typedapi/types/watcherwatchtriggerschedule.go index 4bd2e61262..b0335f5e0f 100644 --- a/typedapi/types/watcherwatchtriggerschedule.go +++ b/typedapi/types/watcherwatchtriggerschedule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WatcherWatchTriggerSchedule type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L466-L469 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L476-L479 type WatcherWatchTriggerSchedule struct { Active int64 `json:"active"` All_ Counter `json:"_all"` @@ -105,3 +105,5 @@ func NewWatcherWatchTriggerSchedule() *WatcherWatchTriggerSchedule { return r } + +// false diff --git a/typedapi/types/watchrecord.go b/typedapi/types/watchrecord.go index d8bd03b976..09d3e1ee9a 100644 --- a/typedapi/types/watchrecord.go +++ b/typedapi/types/watchrecord.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // WatchRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/execute_watch/types.ts#L27-L39 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/execute_watch/types.ts#L27-L39 type WatchRecord struct { Condition WatcherCondition `json:"condition"` Input WatcherInput `json:"input"` @@ -136,3 +136,5 @@ func NewWatchRecord() *WatchRecord { return r } + +// false diff --git a/typedapi/types/watchrecordqueuedstats.go b/typedapi/types/watchrecordqueuedstats.go index 57e240ba34..981b1f309f 100644 --- a/typedapi/types/watchrecordqueuedstats.go +++ b/typedapi/types/watchrecordqueuedstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,8 +30,10 @@ import ( // WatchRecordQueuedStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/stats/types.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/stats/types.ts#L71-L77 type WatchRecordQueuedStats struct { + // ExecutionTime The time the watch was run. + // This is just before the input is being run. ExecutionTime DateTime `json:"execution_time"` } @@ -66,3 +68,5 @@ func NewWatchRecordQueuedStats() *WatchRecordQueuedStats { return r } + +// false diff --git a/typedapi/types/watchrecordstats.go b/typedapi/types/watchrecordstats.go index 6d65db5efd..7d553bd8c3 100644 --- a/typedapi/types/watchrecordstats.go +++ b/typedapi/types/watchrecordstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -32,14 +32,19 @@ import ( // WatchRecordStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/stats/types.ts#L54-L60 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/stats/types.ts#L79-L94 type WatchRecordStats struct { - ExecutedActions []string `json:"executed_actions,omitempty"` - ExecutionPhase executionphase.ExecutionPhase `json:"execution_phase"` - ExecutionTime DateTime `json:"execution_time"` - TriggeredTime DateTime `json:"triggered_time"` - WatchId string `json:"watch_id"` - WatchRecordId string `json:"watch_record_id"` + ExecutedActions []string `json:"executed_actions,omitempty"` + // ExecutionPhase The current watch execution phase. + ExecutionPhase executionphase.ExecutionPhase `json:"execution_phase"` + // ExecutionTime The time the watch was run. + // This is just before the input is being run. + ExecutionTime DateTime `json:"execution_time"` + // TriggeredTime The time the watch was triggered by the trigger engine. + TriggeredTime DateTime `json:"triggered_time"` + WatchId string `json:"watch_id"` + // WatchRecordId The watch record identifier. + WatchRecordId string `json:"watch_record_id"` } func (s *WatchRecordStats) UnmarshalJSON(data []byte) error { @@ -98,3 +103,5 @@ func NewWatchRecordStats() *WatchRecordStats { return r } + +// false diff --git a/typedapi/types/watchstatus.go b/typedapi/types/watchstatus.go index 0aa95eb664..fbefded7cc 100644 --- a/typedapi/types/watchstatus.go +++ b/typedapi/types/watchstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WatchStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Watch.ts#L49-L56 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Watch.ts#L49-L56 type WatchStatus struct { Actions WatcherStatusActions `json:"actions"` ExecutionState *string `json:"execution_state,omitempty"` @@ -104,3 +104,13 @@ func NewWatchStatus() *WatchStatus { return r } + +// true + +type WatchStatusVariant interface { + WatchStatusCaster() *WatchStatus +} + +func (s *WatchStatus) WatchStatusCaster() *WatchStatus { + return s +} diff --git a/typedapi/types/watsonxservicesettings.go b/typedapi/types/watsonxservicesettings.go new file mode 100644 index 0000000000..1349319d05 --- /dev/null +++ b/typedapi/types/watsonxservicesettings.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WatsonxServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/inference/put_watsonx/PutWatsonxRequest.ts#L80-L117 +type WatsonxServiceSettings struct { + // ApiKey A valid API key of your Watsonx account. + // You can find your Watsonx API keys or you can create a new one on the API + // keys page. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // ApiVersion A version parameter that takes a version date in the format of `YYYY-MM-DD`. + // For the active version data parameters, refer to the Wastonx documentation. + ApiVersion string `json:"api_version"` + // ModelId The name of the model to use for the inference task. + // Refer to the IBM Embedding Models section in the Watsonx documentation for + // the list of available text embedding models. + ModelId string `json:"model_id"` + // ProjectId The identifier of the IBM Cloud project to use for the inference task. + ProjectId string `json:"project_id"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Watsonx. + // By default, the `watsonxai` service sets the number of requests allowed per + // minute to 120. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Url The URL of the inference endpoint that you created on Watsonx. + Url string `json:"url"` +} + +func (s *WatsonxServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "api_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiVersion", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiVersion = o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "project_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProjectId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProjectId = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = o + + } + } + return nil +} + +// NewWatsonxServiceSettings returns a WatsonxServiceSettings. +func NewWatsonxServiceSettings() *WatsonxServiceSettings { + r := &WatsonxServiceSettings{} + + return r +} + +// true + +type WatsonxServiceSettingsVariant interface { + WatsonxServiceSettingsCaster() *WatsonxServiceSettings +} + +func (s *WatsonxServiceSettings) WatsonxServiceSettingsCaster() *WatsonxServiceSettings { + return s +} diff --git a/typedapi/types/aggregation.go b/typedapi/types/web.go similarity index 68% rename from typedapi/types/aggregation.go rename to typedapi/types/web.go index f9a632fe02..f5c9900cbc 100644 --- a/typedapi/types/aggregation.go +++ b/typedapi/types/web.go @@ -16,19 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types -// Aggregation type. +// Web type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregation.ts#L20-L20 -type Aggregation struct { +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ingest/_types/Database.ts#L61-L61 +type Web struct { } -// NewAggregation returns a Aggregation. -func NewAggregation() *Aggregation { - r := &Aggregation{} +// NewWeb returns a Web. +func NewWeb() *Web { + r := &Web{} return r } + +// false diff --git a/typedapi/types/webhookaction.go b/typedapi/types/webhookaction.go index 6b9104fa0f..77b9000e4f 100644 --- a/typedapi/types/webhookaction.go +++ b/typedapi/types/webhookaction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -34,7 +34,7 @@ import ( // WebhookAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L293-L293 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L293-L293 type WebhookAction struct { Auth *HttpInputAuthentication `json:"auth,omitempty"` Body *string `json:"body,omitempty"` @@ -166,9 +166,19 @@ func (s *WebhookAction) UnmarshalJSON(data []byte) error { // NewWebhookAction returns a WebhookAction. func NewWebhookAction() *WebhookAction { r := &WebhookAction{ - Headers: make(map[string]string, 0), - Params: make(map[string]string, 0), + Headers: make(map[string]string), + Params: make(map[string]string), } return r } + +// true + +type WebhookActionVariant interface { + WebhookActionCaster() *WebhookAction +} + +func (s *WebhookAction) WebhookActionCaster() *WebhookAction { + return s +} diff --git a/typedapi/types/webhookresult.go b/typedapi/types/webhookresult.go index 5dc621d699..97514e1b0a 100644 --- a/typedapi/types/webhookresult.go +++ b/typedapi/types/webhookresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // WebhookResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/_types/Actions.ts#L295-L298 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/_types/Actions.ts#L295-L298 type WebhookResult struct { Request HttpInputRequestResult `json:"request"` Response *HttpInputResponseResult `json:"response,omitempty"` @@ -34,3 +34,5 @@ func NewWebhookResult() *WebhookResult { return r } + +// false diff --git a/typedapi/types/weightedaverageaggregation.go b/typedapi/types/weightedaverageaggregation.go index 36dc2aef83..ca67507610 100644 --- a/typedapi/types/weightedaverageaggregation.go +++ b/typedapi/types/weightedaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -33,7 +33,7 @@ import ( // WeightedAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L437-L451 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L449-L463 type WeightedAverageAggregation struct { // Format A numeric response formatter. Format *string `json:"format,omitempty"` @@ -97,3 +97,13 @@ func NewWeightedAverageAggregation() *WeightedAverageAggregation { return r } + +// true + +type WeightedAverageAggregationVariant interface { + WeightedAverageAggregationCaster() *WeightedAverageAggregation +} + +func (s *WeightedAverageAggregation) WeightedAverageAggregationCaster() *WeightedAverageAggregation { + return s +} diff --git a/typedapi/types/weightedaveragevalue.go b/typedapi/types/weightedaveragevalue.go index d13a177b8b..7374b5fb22 100644 --- a/typedapi/types/weightedaveragevalue.go +++ b/typedapi/types/weightedaveragevalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WeightedAverageValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/metric.ts#L453-L463 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/metric.ts#L465-L475 type WeightedAverageValue struct { // Field The field from which to extract the values or weights. Field *string `json:"field,omitempty"` @@ -92,3 +92,13 @@ func NewWeightedAverageValue() *WeightedAverageValue { return r } + +// true + +type WeightedAverageValueVariant interface { + WeightedAverageValueCaster() *WeightedAverageValue +} + +func (s *WeightedAverageValue) WeightedAverageValueCaster() *WeightedAverageValue { + return s +} diff --git a/typedapi/types/weightedavgaggregate.go b/typedapi/types/weightedavgaggregate.go index f22e6df9b7..89d4dd5069 100644 --- a/typedapi/types/weightedavgaggregate.go +++ b/typedapi/types/weightedavgaggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WeightedAvgAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/aggregations/Aggregate.ts#L212-L216 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/aggregations/Aggregate.ts#L224-L229 type WeightedAvgAggregate struct { Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to @@ -89,3 +89,5 @@ func NewWeightedAvgAggregate() *WeightedAvgAggregate { return r } + +// false diff --git a/typedapi/types/weightedtokensquery.go b/typedapi/types/weightedtokensquery.go index bc109c7ce7..56abb67c27 100644 --- a/typedapi/types/weightedtokensquery.go +++ b/typedapi/types/weightedtokensquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WeightedTokensQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/WeightedTokensQuery.ts#L27-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/WeightedTokensQuery.ts#L25-L33 type WeightedTokensQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -110,8 +110,18 @@ func (s *WeightedTokensQuery) UnmarshalJSON(data []byte) error { // NewWeightedTokensQuery returns a WeightedTokensQuery. func NewWeightedTokensQuery() *WeightedTokensQuery { r := &WeightedTokensQuery{ - Tokens: make(map[string]float32, 0), + Tokens: make(map[string]float32), } return r } + +// true + +type WeightedTokensQueryVariant interface { + WeightedTokensQueryCaster() *WeightedTokensQuery +} + +func (s *WeightedTokensQuery) WeightedTokensQueryCaster() *WeightedTokensQuery { + return s +} diff --git a/typedapi/types/weights.go b/typedapi/types/weights.go index 8b0a704153..c516a837a8 100644 --- a/typedapi/types/weights.go +++ b/typedapi/types/weights.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // Weights type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/put_trained_model/types.ts#L108-L110 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/put_trained_model/types.ts#L108-L110 type Weights struct { Weights Float64 `json:"weights"` } @@ -78,3 +78,13 @@ func NewWeights() *Weights { return r } + +// true + +type WeightsVariant interface { + WeightsCaster() *Weights +} + +func (s *Weights) WeightsCaster() *Weights { + return s +} diff --git a/typedapi/types/whitespaceanalyzer.go b/typedapi/types/whitespaceanalyzer.go index a738173d49..fe5b744f59 100644 --- a/typedapi/types/whitespaceanalyzer.go +++ b/typedapi/types/whitespaceanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -30,7 +30,7 @@ import ( // WhitespaceAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/analyzers.ts#L108-L111 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/analyzers.ts#L345-L348 type WhitespaceAnalyzer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` @@ -85,3 +85,13 @@ func NewWhitespaceAnalyzer() *WhitespaceAnalyzer { return r } + +// true + +type WhitespaceAnalyzerVariant interface { + WhitespaceAnalyzerCaster() *WhitespaceAnalyzer +} + +func (s *WhitespaceAnalyzer) WhitespaceAnalyzerCaster() *WhitespaceAnalyzer { + return s +} diff --git a/typedapi/types/whitespacetokenizer.go b/typedapi/types/whitespacetokenizer.go index 4adbc15296..c1a5e5e09c 100644 --- a/typedapi/types/whitespacetokenizer.go +++ b/typedapi/types/whitespacetokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WhitespaceTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/tokenizers.ts#L115-L118 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/tokenizers.ts#L135-L138 type WhitespaceTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` @@ -104,3 +104,13 @@ func NewWhitespaceTokenizer() *WhitespaceTokenizer { return r } + +// true + +type WhitespaceTokenizerVariant interface { + WhitespaceTokenizerCaster() *WhitespaceTokenizer +} + +func (s *WhitespaceTokenizer) WhitespaceTokenizerCaster() *WhitespaceTokenizer { + return s +} diff --git a/typedapi/types/wildcardproperty.go b/typedapi/types/wildcardproperty.go index 0411d3d605..fa1e35df9c 100644 --- a/typedapi/types/wildcardproperty.go +++ b/typedapi/types/wildcardproperty.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -29,11 +29,12 @@ import ( "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/syntheticsourcekeepenum" ) // WildcardProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/mapping/core.ts#L291-L298 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/mapping/core.ts#L309-L316 type WildcardProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -41,12 +42,12 @@ type WildcardProperty struct { Fields map[string]Property `json:"fields,omitempty"` IgnoreAbove *int `json:"ignore_above,omitempty"` // Meta Metadata about the field. - Meta map[string]string `json:"meta,omitempty"` - NullValue *string `json:"null_value,omitempty"` - Properties map[string]Property `json:"properties,omitempty"` - Similarity *string `json:"similarity,omitempty"` - Store *bool `json:"store,omitempty"` - Type string `json:"type,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` } func (s *WildcardProperty) UnmarshalJSON(data []byte) error { @@ -118,301 +119,313 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Fields | %w", err) } s.Fields[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Fields[key] = oo } @@ -473,318 +486,318 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { case "binary": oo := NewBinaryProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "boolean": oo := NewBooleanProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "{dynamic_type}": oo := NewDynamicProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "join": oo := NewJoinProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "keyword": oo := NewKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "match_only_text": oo := NewMatchOnlyTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "percolator": oo := NewPercolatorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_feature": oo := NewRankFeatureProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "rank_features": oo := NewRankFeaturesProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "search_as_you_type": oo := NewSearchAsYouTypeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "text": oo := NewTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "version": oo := NewVersionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "wildcard": oo := NewWildcardProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_nanos": oo := NewDateNanosProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date": oo := NewDateProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "aggregate_metric_double": oo := NewAggregateMetricDoubleProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "dense_vector": oo := NewDenseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "flattened": oo := NewFlattenedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "nested": oo := NewNestedProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "object": oo := NewObjectProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "semantic_text": oo := NewSemanticTextProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "sparse_vector": oo := NewSparseVectorProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "completion": oo := NewCompletionProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "constant_keyword": oo := NewConstantKeywordProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "alias": oo := NewFieldAliasProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "histogram": oo := NewHistogramProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip": oo := NewIpProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "murmur3": oo := NewMurmur3HashProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "token_count": oo := NewTokenCountProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_point": oo := NewGeoPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "geo_shape": oo := NewGeoShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "point": oo := NewPointProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "shape": oo := NewShapeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "byte": oo := NewByteNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double": oo := NewDoubleNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float": oo := NewFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "half_float": oo := NewHalfFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer": oo := NewIntegerNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long": oo := NewLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "scaled_float": oo := NewScaledFloatNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "short": oo := NewShortNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "unsigned_long": oo := NewUnsignedLongNumberProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "date_range": oo := NewDateRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "double_range": oo := NewDoubleRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "float_range": oo := NewFloatRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "integer_range": oo := NewIntegerRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "ip_range": oo := NewIpRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "long_range": oo := NewLongRangeProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo case "icu_collation_keyword": oo := NewIcuCollationProperty() if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("Properties | %w", err) } s.Properties[key] = oo default: oo := new(Property) if err := localDec.Decode(&oo); err != nil { - return err + return fmt.Errorf("new(Property) | %w", err) } s.Properties[key] = oo } } - case "similarity": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Similarity", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Similarity = &o - case "store": var tmp any dec.Decode(&tmp) @@ -799,6 +812,11 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { s.Store = &v } + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + case "type": if err := dec.Decode(&s.Type); err != nil { return fmt.Errorf("%s | %w", "Type", err) @@ -813,17 +831,17 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { func (s WildcardProperty) MarshalJSON() ([]byte, error) { type innerWildcardProperty WildcardProperty tmp := innerWildcardProperty{ - CopyTo: s.CopyTo, - DocValues: s.DocValues, - Dynamic: s.Dynamic, - Fields: s.Fields, - IgnoreAbove: s.IgnoreAbove, - Meta: s.Meta, - NullValue: s.NullValue, - Properties: s.Properties, - Similarity: s.Similarity, - Store: s.Store, - Type: s.Type, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, } tmp.Type = "wildcard" @@ -834,10 +852,20 @@ func (s WildcardProperty) MarshalJSON() ([]byte, error) { // NewWildcardProperty returns a WildcardProperty. func NewWildcardProperty() *WildcardProperty { r := &WildcardProperty{ - Fields: make(map[string]Property, 0), - Meta: make(map[string]string, 0), - Properties: make(map[string]Property, 0), + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), } return r } + +// true + +type WildcardPropertyVariant interface { + WildcardPropertyCaster() *WildcardProperty +} + +func (s *WildcardProperty) WildcardPropertyCaster() *WildcardProperty { + return s +} diff --git a/typedapi/types/wildcardquery.go b/typedapi/types/wildcardquery.go index 731ccd044d..01d9e24a60 100644 --- a/typedapi/types/wildcardquery.go +++ b/typedapi/types/wildcardquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WildcardQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/term.ts#L273-L290 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/term.ts#L305-L325 type WildcardQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -163,3 +163,13 @@ func NewWildcardQuery() *WildcardQuery { return r } + +// true + +type WildcardQueryVariant interface { + WildcardQueryCaster() *WildcardQuery +} + +func (s *WildcardQuery) WildcardQueryCaster() *WildcardQuery { + return s +} diff --git a/typedapi/types/wktgeobounds.go b/typedapi/types/wktgeobounds.go index 76c6219782..9f5d81bcc6 100644 --- a/typedapi/types/wktgeobounds.go +++ b/typedapi/types/wktgeobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WktGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/Geo.ts#L150-L152 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/Geo.ts#L150-L152 type WktGeoBounds struct { Wkt string `json:"wkt"` } @@ -74,3 +74,13 @@ func NewWktGeoBounds() *WktGeoBounds { return r } + +// true + +type WktGeoBoundsVariant interface { + WktGeoBoundsCaster() *WktGeoBounds +} + +func (s *WktGeoBounds) WktGeoBoundsCaster() *WktGeoBounds { + return s +} diff --git a/typedapi/types/worddelimitergraphtokenfilter.go b/typedapi/types/worddelimitergraphtokenfilter.go index 3b90236f03..061c00b9a9 100644 --- a/typedapi/types/worddelimitergraphtokenfilter.go +++ b/typedapi/types/worddelimitergraphtokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WordDelimiterGraphTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L151-L168 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L150-L167 type WordDelimiterGraphTokenFilter struct { AdjustOffsets *bool `json:"adjust_offsets,omitempty"` CatenateAll *bool `json:"catenate_all,omitempty"` @@ -295,3 +295,13 @@ func NewWordDelimiterGraphTokenFilter() *WordDelimiterGraphTokenFilter { return r } + +// true + +type WordDelimiterGraphTokenFilterVariant interface { + WordDelimiterGraphTokenFilterCaster() *WordDelimiterGraphTokenFilter +} + +func (s *WordDelimiterGraphTokenFilter) WordDelimiterGraphTokenFilterCaster() *WordDelimiterGraphTokenFilter { + return s +} diff --git a/typedapi/types/worddelimitertokenfilter.go b/typedapi/types/worddelimitertokenfilter.go index 78f2c65111..71d62b0705 100644 --- a/typedapi/types/worddelimitertokenfilter.go +++ b/typedapi/types/worddelimitertokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WordDelimiterTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/analysis/token_filters.ts#L134-L149 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/analysis/token_filters.ts#L133-L148 type WordDelimiterTokenFilter struct { CatenateAll *bool `json:"catenate_all,omitempty"` CatenateNumbers *bool `json:"catenate_numbers,omitempty"` @@ -263,3 +263,13 @@ func NewWordDelimiterTokenFilter() *WordDelimiterTokenFilter { return r } + +// true + +type WordDelimiterTokenFilterVariant interface { + WordDelimiterTokenFilterCaster() *WordDelimiterTokenFilter +} + +func (s *WordDelimiterTokenFilter) WordDelimiterTokenFilterCaster() *WordDelimiterTokenFilter { + return s +} diff --git a/typedapi/types/wrapperquery.go b/typedapi/types/wrapperquery.go index 7ad8960135..bc5a9c7913 100644 --- a/typedapi/types/wrapperquery.go +++ b/typedapi/types/wrapperquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // WrapperQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_types/query_dsl/abstractions.ts#L501-L507 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/_types/query_dsl/abstractions.ts#L508-L517 type WrapperQuery struct { // Boost Floating point number used to decrease or increase the relevance scores of // the query. @@ -111,3 +111,13 @@ func NewWrapperQuery() *WrapperQuery { return r } + +// true + +type WrapperQueryVariant interface { + WrapperQueryCaster() *WrapperQuery +} + +func (s *WrapperQuery) WrapperQueryCaster() *WrapperQuery { + return s +} diff --git a/typedapi/types/writeoperation.go b/typedapi/types/writeoperation.go deleted file mode 100644 index c1284c7caa..0000000000 --- a/typedapi/types/writeoperation.go +++ /dev/null @@ -1,171 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 - -package types - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" -) - -// WriteOperation type. -// -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/_global/bulk/types.ts#L109-L128 -type WriteOperation struct { - // DynamicTemplates A map from the full name of fields to the name of dynamic templates. - // Defaults to an empty map. - // If a name matches a dynamic template, then that template will be applied - // regardless of other match predicates defined in the template. - // If a field is already defined in the mapping, then this parameter won’t be - // used. - DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` - // Id_ The document ID. - Id_ *string `json:"_id,omitempty"` - IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` - IfSeqNo *int64 `json:"if_seq_no,omitempty"` - // Index_ Name of the index or index alias to perform the action on. - Index_ *string `json:"_index,omitempty"` - // Pipeline ID of the pipeline to use to preprocess incoming documents. - // If the index has a default ingest pipeline specified, then setting the value - // to `_none` disables the default ingest pipeline for this request. - // If a final pipeline is configured it will always run, regardless of the value - // of this parameter. - Pipeline *string `json:"pipeline,omitempty"` - // RequireAlias If `true`, the request’s actions must target an index alias. - RequireAlias *bool `json:"require_alias,omitempty"` - // Routing Custom value used to route operations to a specific shard. - Routing *string `json:"routing,omitempty"` - Version *int64 `json:"version,omitempty"` - VersionType *versiontype.VersionType `json:"version_type,omitempty"` -} - -func (s *WriteOperation) UnmarshalJSON(data []byte) error { - - dec := json.NewDecoder(bytes.NewReader(data)) - - for { - t, err := dec.Token() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return err - } - - switch t { - - case "dynamic_templates": - if s.DynamicTemplates == nil { - s.DynamicTemplates = make(map[string]string, 0) - } - if err := dec.Decode(&s.DynamicTemplates); err != nil { - return fmt.Errorf("%s | %w", "DynamicTemplates", err) - } - - case "_id": - if err := dec.Decode(&s.Id_); err != nil { - return fmt.Errorf("%s | %w", "Id_", err) - } - - case "if_primary_term": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("%s | %w", "IfPrimaryTerm", err) - } - s.IfPrimaryTerm = &value - case float64: - f := int64(v) - s.IfPrimaryTerm = &f - } - - case "if_seq_no": - if err := dec.Decode(&s.IfSeqNo); err != nil { - return fmt.Errorf("%s | %w", "IfSeqNo", err) - } - - case "_index": - if err := dec.Decode(&s.Index_); err != nil { - return fmt.Errorf("%s | %w", "Index_", err) - } - - case "pipeline": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { - return fmt.Errorf("%s | %w", "Pipeline", err) - } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) - } - s.Pipeline = &o - - case "require_alias": - var tmp any - dec.Decode(&tmp) - switch v := tmp.(type) { - case string: - value, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("%s | %w", "RequireAlias", err) - } - s.RequireAlias = &value - case bool: - s.RequireAlias = &v - } - - case "routing": - if err := dec.Decode(&s.Routing); err != nil { - return fmt.Errorf("%s | %w", "Routing", err) - } - - case "version": - if err := dec.Decode(&s.Version); err != nil { - return fmt.Errorf("%s | %w", "Version", err) - } - - case "version_type": - if err := dec.Decode(&s.VersionType); err != nil { - return fmt.Errorf("%s | %w", "VersionType", err) - } - - } - } - return nil -} - -// NewWriteOperation returns a WriteOperation. -func NewWriteOperation() *WriteOperation { - r := &WriteOperation{ - DynamicTemplates: make(map[string]string, 0), - } - - return r -} diff --git a/typedapi/types/writesummaryinfo.go b/typedapi/types/writesummaryinfo.go new file mode 100644 index 0000000000..7bd87af2cc --- /dev/null +++ b/typedapi/types/writesummaryinfo.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WriteSummaryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L162-L191 +type WriteSummaryInfo struct { + // Count The number of write operations performed in the test. + Count int `json:"count"` + // TotalElapsed The total elapsed time spent on writing blobs in the test. + TotalElapsed Duration `json:"total_elapsed"` + // TotalElapsedNanos The total elapsed time spent on writing blobs in the test, in nanoseconds. + TotalElapsedNanos int64 `json:"total_elapsed_nanos"` + // TotalSize The total size of all the blobs written in the test. + TotalSize ByteSize `json:"total_size"` + // TotalSizeBytes The total size of all the blobs written in the test, in bytes. + TotalSizeBytes int64 `json:"total_size_bytes"` + // TotalThrottled The total time spent waiting due to the `max_snapshot_bytes_per_sec` + // throttle. + TotalThrottled Duration `json:"total_throttled"` + // TotalThrottledNanos The total time spent waiting due to the `max_snapshot_bytes_per_sec` + // throttle, in nanoseconds. + TotalThrottledNanos int64 `json:"total_throttled_nanos"` +} + +func (s *WriteSummaryInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "total_elapsed": + if err := dec.Decode(&s.TotalElapsed); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsed", err) + } + + case "total_elapsed_nanos": + if err := dec.Decode(&s.TotalElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsedNanos", err) + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + + case "total_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeBytes", err) + } + s.TotalSizeBytes = value + case float64: + f := int64(v) + s.TotalSizeBytes = f + } + + case "total_throttled": + if err := dec.Decode(&s.TotalThrottled); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottled", err) + } + + case "total_throttled_nanos": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalThrottledNanos", err) + } + s.TotalThrottledNanos = value + case float64: + f := int64(v) + s.TotalThrottledNanos = f + } + + } + } + return nil +} + +// NewWriteSummaryInfo returns a WriteSummaryInfo. +func NewWriteSummaryInfo() *WriteSummaryInfo { + r := &WriteSummaryInfo{} + + return r +} + +// false diff --git a/typedapi/types/xpackdatafeed.go b/typedapi/types/xpackdatafeed.go index e7544058db..8a87eeb749 100644 --- a/typedapi/types/xpackdatafeed.go +++ b/typedapi/types/xpackdatafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // XpackDatafeed type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L77-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L74-L76 type XpackDatafeed struct { Count int64 `json:"count"` } @@ -77,3 +77,5 @@ func NewXpackDatafeed() *XpackDatafeed { return r } + +// false diff --git a/typedapi/types/xpackfeature.go b/typedapi/types/xpackfeature.go index 4897d37353..ca9f3638c3 100644 --- a/typedapi/types/xpackfeature.go +++ b/typedapi/types/xpackfeature.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // XpackFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/info/types.ts#L77-L82 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/info/types.ts#L85-L90 type XpackFeature struct { Available bool `json:"available"` Description *string `json:"description,omitempty"` @@ -110,3 +110,5 @@ func NewXpackFeature() *XpackFeature { return r } + +// false diff --git a/typedapi/types/xpackfeatures.go b/typedapi/types/xpackfeatures.go index fc05b61005..f320a949a8 100644 --- a/typedapi/types/xpackfeatures.go +++ b/typedapi/types/xpackfeatures.go @@ -16,28 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types // XpackFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/info/types.ts#L42-L75 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/info/types.ts#L42-L83 type XpackFeatures struct { AggregateMetric XpackFeature `json:"aggregate_metric"` Analytics XpackFeature `json:"analytics"` Archive XpackFeature `json:"archive"` Ccr XpackFeature `json:"ccr"` - DataFrame *XpackFeature `json:"data_frame,omitempty"` - DataScience *XpackFeature `json:"data_science,omitempty"` DataStreams XpackFeature `json:"data_streams"` DataTiers XpackFeature `json:"data_tiers"` Enrich XpackFeature `json:"enrich"` + EnterpriseSearch XpackFeature `json:"enterprise_search"` Eql XpackFeature `json:"eql"` - Flattened *XpackFeature `json:"flattened,omitempty"` + Esql XpackFeature `json:"esql"` FrozenIndices XpackFeature `json:"frozen_indices"` Graph XpackFeature `json:"graph"` Ilm XpackFeature `json:"ilm"` + Logsdb XpackFeature `json:"logsdb"` Logstash XpackFeature `json:"logstash"` Ml XpackFeature `json:"ml"` Monitoring XpackFeature `json:"monitoring"` @@ -49,7 +49,7 @@ type XpackFeatures struct { Spatial XpackFeature `json:"spatial"` Sql XpackFeature `json:"sql"` Transform XpackFeature `json:"transform"` - Vectors *XpackFeature `json:"vectors,omitempty"` + UniversalProfiling XpackFeature `json:"universal_profiling"` VotingOnly XpackFeature `json:"voting_only"` Watcher XpackFeature `json:"watcher"` } @@ -60,3 +60,5 @@ func NewXpackFeatures() *XpackFeatures { return r } + +// false diff --git a/typedapi/types/xpackquery.go b/typedapi/types/xpackquery.go index 7a51298683..a7509b3b14 100644 --- a/typedapi/types/xpackquery.go +++ b/typedapi/types/xpackquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // XpackQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L259-L264 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L269-L274 type XpackQuery struct { Count *int `json:"count,omitempty"` Failed *int `json:"failed,omitempty"` @@ -129,3 +129,5 @@ func NewXpackQuery() *XpackQuery { return r } + +// false diff --git a/typedapi/types/xpackrealm.go b/typedapi/types/xpackrealm.go index eda6b41aa9..0901cb62da 100644 --- a/typedapi/types/xpackrealm.go +++ b/typedapi/types/xpackrealm.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // XpackRealm type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L417-L426 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L427-L436 type XpackRealm struct { Available bool `json:"available"` Cache []RealmCache `json:"cache,omitempty"` @@ -139,3 +139,5 @@ func NewXpackRealm() *XpackRealm { return r } + +// false diff --git a/typedapi/types/xpackrolemapping.go b/typedapi/types/xpackrolemapping.go index e60186f9d4..fc8301c414 100644 --- a/typedapi/types/xpackrolemapping.go +++ b/typedapi/types/xpackrolemapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // XpackRoleMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L270-L273 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L280-L283 type XpackRoleMapping struct { Enabled int `json:"enabled"` Size int `json:"size"` @@ -95,3 +95,5 @@ func NewXpackRoleMapping() *XpackRoleMapping { return r } + +// false diff --git a/typedapi/types/xpackruntimefieldtypes.go b/typedapi/types/xpackruntimefieldtypes.go index 6048ed28a6..7c07b79701 100644 --- a/typedapi/types/xpackruntimefieldtypes.go +++ b/typedapi/types/xpackruntimefieldtypes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // XpackRuntimeFieldTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/types.ts#L275-L277 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/types.ts#L285-L287 type XpackRuntimeFieldTypes struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -97,3 +97,5 @@ func NewXpackRuntimeFieldTypes() *XpackRuntimeFieldTypes { return r } + +// false diff --git a/typedapi/types/zeroshotclassificationinferenceoptions.go b/typedapi/types/zeroshotclassificationinferenceoptions.go index d8208152f4..f3cd89398a 100644 --- a/typedapi/types/zeroshotclassificationinferenceoptions.go +++ b/typedapi/types/zeroshotclassificationinferenceoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ZeroShotClassificationInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L201-L222 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L185-L206 type ZeroShotClassificationInferenceOptions struct { // ClassificationLabels The zero shot classification labels indicating entailment, neutral, and // contradiction @@ -129,3 +129,13 @@ func NewZeroShotClassificationInferenceOptions() *ZeroShotClassificationInferenc return r } + +// true + +type ZeroShotClassificationInferenceOptionsVariant interface { + ZeroShotClassificationInferenceOptionsCaster() *ZeroShotClassificationInferenceOptions +} + +func (s *ZeroShotClassificationInferenceOptions) ZeroShotClassificationInferenceOptionsCaster() *ZeroShotClassificationInferenceOptions { + return s +} diff --git a/typedapi/types/zeroshotclassificationinferenceupdateoptions.go b/typedapi/types/zeroshotclassificationinferenceupdateoptions.go index e460258850..63f74e57b4 100644 --- a/typedapi/types/zeroshotclassificationinferenceupdateoptions.go +++ b/typedapi/types/zeroshotclassificationinferenceupdateoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package types @@ -31,7 +31,7 @@ import ( // ZeroShotClassificationInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/ml/_types/inference.ts#L374-L383 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/ml/_types/inference.ts#L362-L371 type ZeroShotClassificationInferenceUpdateOptions struct { // Labels The labels to predict. Labels []string `json:"labels"` @@ -107,3 +107,13 @@ func NewZeroShotClassificationInferenceUpdateOptions() *ZeroShotClassificationIn return r } + +// true + +type ZeroShotClassificationInferenceUpdateOptionsVariant interface { + ZeroShotClassificationInferenceUpdateOptionsCaster() *ZeroShotClassificationInferenceUpdateOptions +} + +func (s *ZeroShotClassificationInferenceUpdateOptions) ZeroShotClassificationInferenceUpdateOptionsCaster() *ZeroShotClassificationInferenceUpdateOptions { + return s +} diff --git a/typedapi/watcher/ackwatch/ack_watch.go b/typedapi/watcher/ackwatch/ack_watch.go index fbf0ccdbe4..fd59bbbfd4 100644 --- a/typedapi/watcher/ackwatch/ack_watch.go +++ b/typedapi/watcher/ackwatch/ack_watch.go @@ -16,10 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Acknowledges a watch, manually throttling the execution of the watch's -// actions. +// Acknowledge a watch. +// Acknowledging a watch enables you to manually throttle the execution of the +// watch's actions. +// +// The acknowledgement state of an action is stored in the +// `status.actions..ack.state` structure. +// +// IMPORTANT: If the specified watch is currently being executed, this API will +// return an error +// The reason for this behavior is to prevent overwriting the watch status from +// a watch execution. +// +// Acknowledging an action throttles further executions of that action until its +// `ack.state` is reset to `awaits_successful_execution`. +// This happens when the condition of the watch is not met (the condition +// evaluates to false). package ackwatch import ( @@ -80,8 +94,22 @@ func NewAckWatchFunc(tp elastictransport.Interface) NewAckWatch { } } -// Acknowledges a watch, manually throttling the execution of the watch's -// actions. +// Acknowledge a watch. +// Acknowledging a watch enables you to manually throttle the execution of the +// watch's actions. +// +// The acknowledgement state of an action is stored in the +// `status.actions..ack.state` structure. +// +// IMPORTANT: If the specified watch is currently being executed, this API will +// return an error +// The reason for this behavior is to prevent overwriting the watch status from +// a watch execution. +// +// Acknowledging an action throttles further executions of that action until its +// `ack.state` is reset to `awaits_successful_execution`. +// This happens when the condition of the watch is not met (the condition +// evaluates to false). // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html func New(tp elastictransport.Interface) *AckWatch { @@ -318,7 +346,7 @@ func (r *AckWatch) Header(key, value string) *AckWatch { return r } -// WatchId Watch ID +// WatchId The watch identifier. // API Name: watchid func (r *AckWatch) _watchid(watchid string) *AckWatch { r.paramSet |= watchidMask @@ -327,7 +355,8 @@ func (r *AckWatch) _watchid(watchid string) *AckWatch { return r } -// ActionId A comma-separated list of the action ids to be acked +// ActionId A comma-separated list of the action identifiers to acknowledge. +// If you omit this parameter, all of the actions of the watch are acknowledged. // API Name: actionid func (r *AckWatch) ActionId(actionid string) *AckWatch { r.paramSet |= actionidMask diff --git a/typedapi/watcher/ackwatch/response.go b/typedapi/watcher/ackwatch/response.go index 61cbdd8d6a..a059c02f6d 100644 --- a/typedapi/watcher/ackwatch/response.go +++ b/typedapi/watcher/ackwatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package ackwatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package ackwatch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/ack_watch/WatcherAckWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/ack_watch/WatcherAckWatchResponse.ts#L22-L24 type Response struct { Status types.WatchStatus `json:"status"` } diff --git a/typedapi/watcher/activatewatch/activate_watch.go b/typedapi/watcher/activatewatch/activate_watch.go index 431755312e..eae022c7f8 100644 --- a/typedapi/watcher/activatewatch/activate_watch.go +++ b/typedapi/watcher/activatewatch/activate_watch.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Activates a currently inactive watch. +// Activate a watch. +// A watch can be either active or inactive. package activatewatch import ( @@ -76,7 +77,8 @@ func NewActivateWatchFunc(tp elastictransport.Interface) NewActivateWatch { } } -// Activates a currently inactive watch. +// Activate a watch. +// A watch can be either active or inactive. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html func New(tp elastictransport.Interface) *ActivateWatch { @@ -292,7 +294,7 @@ func (r *ActivateWatch) Header(key, value string) *ActivateWatch { return r } -// WatchId Watch ID +// WatchId The watch identifier. // API Name: watchid func (r *ActivateWatch) _watchid(watchid string) *ActivateWatch { r.paramSet |= watchidMask diff --git a/typedapi/watcher/activatewatch/response.go b/typedapi/watcher/activatewatch/response.go index 8d7fd2bcb3..a953178d94 100644 --- a/typedapi/watcher/activatewatch/response.go +++ b/typedapi/watcher/activatewatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package activatewatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package activatewatch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/activate_watch/WatcherActivateWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/activate_watch/WatcherActivateWatchResponse.ts#L22-L24 type Response struct { Status types.ActivationStatus `json:"status"` } diff --git a/typedapi/watcher/deactivatewatch/deactivate_watch.go b/typedapi/watcher/deactivatewatch/deactivate_watch.go index 7ceb5271a6..f9e4afd6b9 100644 --- a/typedapi/watcher/deactivatewatch/deactivate_watch.go +++ b/typedapi/watcher/deactivatewatch/deactivate_watch.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Deactivates a currently active watch. +// Deactivate a watch. +// A watch can be either active or inactive. package deactivatewatch import ( @@ -76,7 +77,8 @@ func NewDeactivateWatchFunc(tp elastictransport.Interface) NewDeactivateWatch { } } -// Deactivates a currently active watch. +// Deactivate a watch. +// A watch can be either active or inactive. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html func New(tp elastictransport.Interface) *DeactivateWatch { @@ -292,7 +294,7 @@ func (r *DeactivateWatch) Header(key, value string) *DeactivateWatch { return r } -// WatchId Watch ID +// WatchId The watch identifier. // API Name: watchid func (r *DeactivateWatch) _watchid(watchid string) *DeactivateWatch { r.paramSet |= watchidMask diff --git a/typedapi/watcher/deactivatewatch/response.go b/typedapi/watcher/deactivatewatch/response.go index 6933fdae13..4a18de5c7b 100644 --- a/typedapi/watcher/deactivatewatch/response.go +++ b/typedapi/watcher/deactivatewatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deactivatewatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deactivatewatch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/deactivate_watch/DeactivateWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/deactivate_watch/DeactivateWatchResponse.ts#L22-L24 type Response struct { Status types.ActivationStatus `json:"status"` } diff --git a/typedapi/watcher/deletewatch/delete_watch.go b/typedapi/watcher/deletewatch/delete_watch.go index 1b013d3332..4c170faa17 100644 --- a/typedapi/watcher/deletewatch/delete_watch.go +++ b/typedapi/watcher/deletewatch/delete_watch.go @@ -16,9 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Removes a watch from Watcher. +// Delete a watch. +// When the watch is removed, the document representing the watch in the +// `.watches` index is gone and it will never be run again. +// +// Deleting a watch does not delete any watch execution records related to this +// watch from the watch history. +// +// IMPORTANT: Deleting a watch must be done by using only this API. +// Do not delete the watch directly from the `.watches` index using the +// Elasticsearch delete document API +// When Elasticsearch security features are enabled, make sure no write +// privileges are granted to anyone for the `.watches` index. package deletewatch import ( @@ -76,7 +87,18 @@ func NewDeleteWatchFunc(tp elastictransport.Interface) NewDeleteWatch { } } -// Removes a watch from Watcher. +// Delete a watch. +// When the watch is removed, the document representing the watch in the +// `.watches` index is gone and it will never be run again. +// +// Deleting a watch does not delete any watch execution records related to this +// watch from the watch history. +// +// IMPORTANT: Deleting a watch must be done by using only this API. +// Do not delete the watch directly from the `.watches` index using the +// Elasticsearch delete document API +// When Elasticsearch security features are enabled, make sure no write +// privileges are granted to anyone for the `.watches` index. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html func New(tp elastictransport.Interface) *DeleteWatch { @@ -290,7 +312,7 @@ func (r *DeleteWatch) Header(key, value string) *DeleteWatch { return r } -// Id Watch ID +// Id The watch identifier. // API Name: id func (r *DeleteWatch) _id(id string) *DeleteWatch { r.paramSet |= idMask diff --git a/typedapi/watcher/deletewatch/response.go b/typedapi/watcher/deletewatch/response.go index 4fc55b5e38..9b8e09cbbe 100644 --- a/typedapi/watcher/deletewatch/response.go +++ b/typedapi/watcher/deletewatch/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package deletewatch // Response holds the response body struct for the package deletewatch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/delete_watch/DeleteWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/delete_watch/DeleteWatchResponse.ts#L22-L24 type Response struct { Found bool `json:"found"` Id_ string `json:"_id"` diff --git a/typedapi/watcher/executewatch/execute_watch.go b/typedapi/watcher/executewatch/execute_watch.go index da05949df3..8cbab40d58 100644 --- a/typedapi/watcher/executewatch/execute_watch.go +++ b/typedapi/watcher/executewatch/execute_watch.go @@ -16,15 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d +// Run a watch. // This API can be used to force execution of the watch outside of its // triggering logic or to simulate the watch execution for debugging purposes. +// // For testing and debugging purposes, you also have fine-grained control on how -// the watch runs. You can execute the watch without executing all of its -// actions or alternatively by simulating them. You can also force execution by -// ignoring the watch condition and control whether a watch record would be -// written to the watch history after execution. +// the watch runs. +// You can run the watch without running all of its actions or alternatively by +// simulating them. +// You can also force execution by ignoring the watch condition and control +// whether a watch record would be written to the watch history after it runs. +// +// You can use the run watch API to run watches that are not yet registered by +// specifying the watch definition inline. +// This serves as great tool for testing and debugging your watches prior to +// adding them to Watcher. +// +// When Elasticsearch security features are enabled on your cluster, watches are +// run with the privileges of the user that stored the watches. +// If your user is allowed to read index `a`, but not index `b`, then the exact +// same set of rules will apply during execution of a watch. +// +// When using the run watch API, the authorization data of the user that called +// the API will be used as a base, instead of the information who stored the +// watch. package executewatch import ( @@ -86,13 +103,30 @@ func NewExecuteWatchFunc(tp elastictransport.Interface) NewExecuteWatch { } } +// Run a watch. // This API can be used to force execution of the watch outside of its // triggering logic or to simulate the watch execution for debugging purposes. +// // For testing and debugging purposes, you also have fine-grained control on how -// the watch runs. You can execute the watch without executing all of its -// actions or alternatively by simulating them. You can also force execution by -// ignoring the watch condition and control whether a watch record would be -// written to the watch history after execution. +// the watch runs. +// You can run the watch without running all of its actions or alternatively by +// simulating them. +// You can also force execution by ignoring the watch condition and control +// whether a watch record would be written to the watch history after it runs. +// +// You can use the run watch API to run watches that are not yet registered by +// specifying the watch definition inline. +// This serves as great tool for testing and debugging your watches prior to +// adding them to Watcher. +// +// When Elasticsearch security features are enabled on your cluster, watches are +// run with the privileges of the user that stored the watches. +// If your user is allowed to read index `a`, but not index `b`, then the exact +// same set of rules will apply during execution of a watch. +// +// When using the run watch API, the authorization data of the user that called +// the API will be used as a base, instead of the information who stored the +// watch. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html func New(tp elastictransport.Interface) *ExecuteWatch { @@ -102,8 +136,6 @@ func New(tp elastictransport.Interface) *ExecuteWatch { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -328,7 +360,7 @@ func (r *ExecuteWatch) Header(key, value string) *ExecuteWatch { return r } -// Id Identifier for the watch. +// Id The watch identifier. // API Name: id func (r *ExecuteWatch) Id(id string) *ExecuteWatch { r.paramSet |= idMask @@ -389,69 +421,135 @@ func (r *ExecuteWatch) Pretty(pretty bool) *ExecuteWatch { return r } -// ActionModes Determines how to handle the watch actions as part of the watch execution. +// Determines how to handle the watch actions as part of the watch execution. // API name: action_modes func (r *ExecuteWatch) ActionModes(actionmodes map[string]actionexecutionmode.ActionExecutionMode) *ExecuteWatch { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.ActionModes = actionmodes + return r +} + +func (r *ExecuteWatch) AddActionMode(key string, value actionexecutionmode.ActionExecutionMode) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]actionexecutionmode.ActionExecutionMode + if r.req.ActionModes == nil { + r.req.ActionModes = make(map[string]actionexecutionmode.ActionExecutionMode) + } else { + tmp = r.req.ActionModes + } + tmp[key] = value + + r.req.ActionModes = tmp return r } -// AlternativeInput When present, the watch uses this object as a payload instead of executing +// When present, the watch uses this object as a payload instead of executing // its own input. // API name: alternative_input func (r *ExecuteWatch) AlternativeInput(alternativeinput map[string]json.RawMessage) *ExecuteWatch { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.AlternativeInput = alternativeinput + return r +} + +func (r *ExecuteWatch) AddAlternativeInput(key string, value json.RawMessage) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.AlternativeInput == nil { + r.req.AlternativeInput = make(map[string]json.RawMessage) + } else { + tmp = r.req.AlternativeInput + } + + tmp[key] = value + r.req.AlternativeInput = tmp return r } -// IgnoreCondition When set to `true`, the watch execution uses the always condition. This can +// When set to `true`, the watch execution uses the always condition. This can // also be specified as an HTTP parameter. // API name: ignore_condition func (r *ExecuteWatch) IgnoreCondition(ignorecondition bool) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IgnoreCondition = &ignorecondition return r } -// RecordExecution When set to `true`, the watch record representing the watch execution result -// is persisted to the `.watcher-history` index for the current time. In -// addition, the status of the watch is updated, possibly throttling subsequent -// executions. This can also be specified as an HTTP parameter. +// When set to `true`, the watch record representing the watch execution result +// is persisted to the `.watcher-history` index for the current time. +// In addition, the status of the watch is updated, possibly throttling +// subsequent runs. +// This can also be specified as an HTTP parameter. // API name: record_execution func (r *ExecuteWatch) RecordExecution(recordexecution bool) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RecordExecution = &recordexecution return r } // API name: simulated_actions -func (r *ExecuteWatch) SimulatedActions(simulatedactions *types.SimulatedActions) *ExecuteWatch { +func (r *ExecuteWatch) SimulatedActions(simulatedactions types.SimulatedActionsVariant) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.SimulatedActions = simulatedactions + r.req.SimulatedActions = simulatedactions.SimulatedActionsCaster() return r } -// TriggerData This structure is parsed as the data of the trigger event that will be used -// during the watch execution +// This structure is parsed as the data of the trigger event that will be used +// during the watch execution. // API name: trigger_data -func (r *ExecuteWatch) TriggerData(triggerdata *types.ScheduleTriggerEvent) *ExecuteWatch { +func (r *ExecuteWatch) TriggerData(triggerdata types.ScheduleTriggerEventVariant) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.TriggerData = triggerdata + r.req.TriggerData = triggerdata.ScheduleTriggerEventCaster() return r } -// Watch When present, this watch is used instead of the one specified in the request. -// This watch is not persisted to the index and record_execution cannot be set. +// When present, this watch is used instead of the one specified in the request. +// This watch is not persisted to the index and `record_execution` cannot be +// set. // API name: watch -func (r *ExecuteWatch) Watch(watch *types.Watch) *ExecuteWatch { +func (r *ExecuteWatch) Watch(watch types.WatchVariant) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Watch = watch + r.req.Watch = watch.WatchCaster() return r } diff --git a/typedapi/watcher/executewatch/request.go b/typedapi/watcher/executewatch/request.go index 6ebf691457..0d7e40ffb7 100644 --- a/typedapi/watcher/executewatch/request.go +++ b/typedapi/watcher/executewatch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package executewatch @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package executewatch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L105 type Request struct { // ActionModes Determines how to handle the watch actions as part of the watch execution. @@ -42,16 +42,18 @@ type Request struct { // also be specified as an HTTP parameter. IgnoreCondition *bool `json:"ignore_condition,omitempty"` // RecordExecution When set to `true`, the watch record representing the watch execution result - // is persisted to the `.watcher-history` index for the current time. In - // addition, the status of the watch is updated, possibly throttling subsequent - // executions. This can also be specified as an HTTP parameter. + // is persisted to the `.watcher-history` index for the current time. + // In addition, the status of the watch is updated, possibly throttling + // subsequent runs. + // This can also be specified as an HTTP parameter. RecordExecution *bool `json:"record_execution,omitempty"` SimulatedActions *types.SimulatedActions `json:"simulated_actions,omitempty"` // TriggerData This structure is parsed as the data of the trigger event that will be used - // during the watch execution + // during the watch execution. TriggerData *types.ScheduleTriggerEvent `json:"trigger_data,omitempty"` // Watch When present, this watch is used instead of the one specified in the request. - // This watch is not persisted to the index and record_execution cannot be set. + // This watch is not persisted to the index and `record_execution` cannot be + // set. Watch *types.Watch `json:"watch,omitempty"` } diff --git a/typedapi/watcher/executewatch/response.go b/typedapi/watcher/executewatch/response.go index b45a408113..eb1bd61480 100644 --- a/typedapi/watcher/executewatch/response.go +++ b/typedapi/watcher/executewatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package executewatch @@ -26,9 +26,14 @@ import ( // Response holds the response body struct for the package executewatch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/execute_watch/WatcherExecuteWatchResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/execute_watch/WatcherExecuteWatchResponse.ts#L23-L34 type Response struct { - Id_ string `json:"_id"` + + // Id_ The watch record identifier as it would be stored in the `.watcher-history` + // index. + Id_ string `json:"_id"` + // WatchRecord The watch record document as it would be stored in the `.watcher-history` + // index. WatchRecord types.WatchRecord `json:"watch_record"` } diff --git a/typedapi/watcher/getsettings/get_settings.go b/typedapi/watcher/getsettings/get_settings.go index 90f4b6328e..de0e253494 100644 --- a/typedapi/watcher/getsettings/get_settings.go +++ b/typedapi/watcher/getsettings/get_settings.go @@ -16,21 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieve settings for the watcher system index +// Get Watcher index settings. +// Get settings for the Watcher internal index (`.watches`). +// Only a subset of settings are shown, for example `index.auto_expand_replicas` +// and `index.number_of_replicas`. package getsettings import ( "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -65,7 +71,10 @@ func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { } } -// Retrieve settings for the watcher system index +// Get Watcher index settings. +// Get settings for the Watcher internal index (`.watches`). +// Only a subset of settings are shown, for example `index.auto_expand_replicas` +// and `index.number_of_replicas`. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-settings.html func New(tp elastictransport.Interface) *GetSettings { @@ -180,8 +189,57 @@ func (r GetSettings) Perform(providedCtx context.Context) (*http.Response, error } // Do runs the request through the transport, handle the response and returns a getsettings.Response -func (r GetSettings) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) +func (r GetSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -229,3 +287,57 @@ func (r *GetSettings) Header(key, value string) *GetSettings { return r } + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSettings) ErrorTrace(errortrace bool) *GetSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSettings) FilterPath(filterpaths ...string) *GetSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSettings) Human(human bool) *GetSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSettings) Pretty(pretty bool) *GetSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/typedapi/watcher/getsettings/response.go b/typedapi/watcher/getsettings/response.go new file mode 100644 index 0000000000..b51dc96ea4 --- /dev/null +++ b/typedapi/watcher/getsettings/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package getsettings + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/get_settings/WatcherGetSettingsResponse.ts#L22-L26 +type Response struct { + Index types.IndexSettings `json:"index"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/watcher/getwatch/get_watch.go b/typedapi/watcher/getwatch/get_watch.go index e7f3b2d10f..539880dcaf 100644 --- a/typedapi/watcher/getwatch/get_watch.go +++ b/typedapi/watcher/getwatch/get_watch.go @@ -16,9 +16,9 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves a watch by its ID. +// Get a watch. package getwatch import ( @@ -76,7 +76,7 @@ func NewGetWatchFunc(tp elastictransport.Interface) NewGetWatch { } } -// Retrieves a watch by its ID. +// Get a watch. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html func New(tp elastictransport.Interface) *GetWatch { @@ -290,7 +290,7 @@ func (r *GetWatch) Header(key, value string) *GetWatch { return r } -// Id Watch ID +// Id The watch identifier. // API Name: id func (r *GetWatch) _id(id string) *GetWatch { r.paramSet |= idMask diff --git a/typedapi/watcher/getwatch/response.go b/typedapi/watcher/getwatch/response.go index 8462a9768c..4782be7923 100644 --- a/typedapi/watcher/getwatch/response.go +++ b/typedapi/watcher/getwatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package getwatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getwatch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/get_watch/GetWatchResponse.ts#L24-L34 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/get_watch/GetWatchResponse.ts#L24-L34 type Response struct { Found bool `json:"found"` Id_ string `json:"_id"` diff --git a/typedapi/watcher/putwatch/put_watch.go b/typedapi/watcher/putwatch/put_watch.go index 8a358d15fa..a6dd9bee6b 100644 --- a/typedapi/watcher/putwatch/put_watch.go +++ b/typedapi/watcher/putwatch/put_watch.go @@ -16,9 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Creates a new watch, or updates an existing one. +// Create or update a watch. +// When a watch is registered, a new document that represents the watch is added +// to the `.watches` index and its trigger is immediately registered with the +// relevant trigger engine. +// Typically for the `schedule` trigger, the scheduler is the trigger engine. +// +// IMPORTANT: You must use Kibana or this API to create a watch. +// Do not add a watch directly to the `.watches` index by using the +// Elasticsearch index API. +// If Elasticsearch security features are enabled, do not give users write +// privileges on the `.watches` index. +// +// When you add a watch you can also define its initial active state by setting +// the *active* parameter. +// +// When Elasticsearch security features are enabled, your watch can index or +// search only on indices for which the user that stored the watch has +// privileges. +// If the user is able to read index `a`, but not index `b`, the same will apply +// when the watch runs. package putwatch import ( @@ -81,7 +100,26 @@ func NewPutWatchFunc(tp elastictransport.Interface) NewPutWatch { } } -// Creates a new watch, or updates an existing one. +// Create or update a watch. +// When a watch is registered, a new document that represents the watch is added +// to the `.watches` index and its trigger is immediately registered with the +// relevant trigger engine. +// Typically for the `schedule` trigger, the scheduler is the trigger engine. +// +// IMPORTANT: You must use Kibana or this API to create a watch. +// Do not add a watch directly to the `.watches` index by using the +// Elasticsearch index API. +// If Elasticsearch security features are enabled, do not give users write +// privileges on the `.watches` index. +// +// When you add a watch you can also define its initial active state by setting +// the *active* parameter. +// +// When Elasticsearch security features are enabled, your watch can index or +// search only on indices for which the user that stored the watch has +// privileges. +// If the user is able to read index `a`, but not index `b`, the same will apply +// when the watch runs. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html func New(tp elastictransport.Interface) *PutWatch { @@ -91,8 +129,6 @@ func New(tp elastictransport.Interface) *PutWatch { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -306,7 +342,7 @@ func (r *PutWatch) Header(key, value string) *PutWatch { return r } -// Id Watch ID +// Id The identifier for the watch. // API Name: id func (r *PutWatch) _id(id string) *PutWatch { r.paramSet |= idMask @@ -315,7 +351,8 @@ func (r *PutWatch) _id(id string) *PutWatch { return r } -// Active Specify whether the watch is in/active by default +// Active The initial state of the watch. +// The default value is `true`, which means the watch is active by default. // API name: active func (r *PutWatch) Active(active bool) *PutWatch { r.values.Set("active", strconv.FormatBool(active)) @@ -393,57 +430,131 @@ func (r *PutWatch) Pretty(pretty bool) *PutWatch { return r } +// The list of actions that will be run if the condition matches. // API name: actions func (r *PutWatch) Actions(actions map[string]types.WatcherAction) *PutWatch { - + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } r.req.Actions = actions + return r +} + +func (r *PutWatch) AddAction(key string, value types.WatcherActionVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.WatcherAction + if r.req.Actions == nil { + r.req.Actions = make(map[string]types.WatcherAction) + } else { + tmp = r.req.Actions + } + + tmp[key] = *value.WatcherActionCaster() + r.req.Actions = tmp return r } +// The condition that defines if the actions should be run. // API name: condition -func (r *PutWatch) Condition(condition *types.WatcherCondition) *PutWatch { +func (r *PutWatch) Condition(condition types.WatcherConditionVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Condition = condition + r.req.Condition = condition.WatcherConditionCaster() return r } +// The input that defines the input that loads the data for the watch. // API name: input -func (r *PutWatch) Input(input *types.WatcherInput) *PutWatch { +func (r *PutWatch) Input(input types.WatcherInputVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Input = input + r.req.Input = input.WatcherInputCaster() return r } +// Metadata JSON that will be copied into the history entries. // API name: metadata -func (r *PutWatch) Metadata(metadata types.Metadata) *PutWatch { - r.req.Metadata = metadata +func (r *PutWatch) Metadata(metadata types.MetadataVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() return r } +// The minimum time between actions being run. +// The default is 5 seconds. +// This default can be changed in the config file with the setting +// `xpack.watcher.throttle.period.default_period`. +// If both this value and the `throttle_period_in_millis` parameter are +// specified, Watcher uses the last parameter included in the request. // API name: throttle_period -func (r *PutWatch) ThrottlePeriod(throttleperiod string) *PutWatch { +func (r *PutWatch) ThrottlePeriod(duration types.DurationVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.ThrottlePeriod = &throttleperiod + r.req.ThrottlePeriod = *duration.DurationCaster() return r } +// Minimum time in milliseconds between actions being run. Defaults to 5000. If +// both this value and the throttle_period parameter are specified, Watcher uses +// the last parameter included in the request. +// API name: throttle_period_in_millis +func (r *PutWatch) ThrottlePeriodInMillis(durationvalueunitmillis int64) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ThrottlePeriodInMillis = &durationvalueunitmillis + + return r +} + +// The transform that processes the watch payload to prepare it for the watch +// actions. // API name: transform -func (r *PutWatch) Transform(transform *types.TransformContainer) *PutWatch { +func (r *PutWatch) Transform(transform types.TransformContainerVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Transform = transform + r.req.Transform = transform.TransformContainerCaster() return r } +// The trigger that defines when the watch should run. // API name: trigger -func (r *PutWatch) Trigger(trigger *types.TriggerContainer) *PutWatch { +func (r *PutWatch) Trigger(trigger types.TriggerContainerVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Trigger = trigger + r.req.Trigger = trigger.TriggerContainerCaster() return r } diff --git a/typedapi/watcher/putwatch/request.go b/typedapi/watcher/putwatch/request.go index c31ad916e4..015f90cd59 100644 --- a/typedapi/watcher/putwatch/request.go +++ b/typedapi/watcher/putwatch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putwatch @@ -26,22 +26,39 @@ import ( "errors" "fmt" "io" - "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Request holds the request body struct for the package putwatch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/put_watch/WatcherPutWatchRequest.ts#L30-L53 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/put_watch/WatcherPutWatchRequest.ts#L31-L110 type Request struct { - Actions map[string]types.WatcherAction `json:"actions,omitempty"` - Condition *types.WatcherCondition `json:"condition,omitempty"` - Input *types.WatcherInput `json:"input,omitempty"` - Metadata types.Metadata `json:"metadata,omitempty"` - ThrottlePeriod *string `json:"throttle_period,omitempty"` - Transform *types.TransformContainer `json:"transform,omitempty"` - Trigger *types.TriggerContainer `json:"trigger,omitempty"` + + // Actions The list of actions that will be run if the condition matches. + Actions map[string]types.WatcherAction `json:"actions,omitempty"` + // Condition The condition that defines if the actions should be run. + Condition *types.WatcherCondition `json:"condition,omitempty"` + // Input The input that defines the input that loads the data for the watch. + Input *types.WatcherInput `json:"input,omitempty"` + // Metadata Metadata JSON that will be copied into the history entries. + Metadata types.Metadata `json:"metadata,omitempty"` + // ThrottlePeriod The minimum time between actions being run. + // The default is 5 seconds. + // This default can be changed in the config file with the setting + // `xpack.watcher.throttle.period.default_period`. + // If both this value and the `throttle_period_in_millis` parameter are + // specified, Watcher uses the last parameter included in the request. + ThrottlePeriod types.Duration `json:"throttle_period,omitempty"` + // ThrottlePeriodInMillis Minimum time in milliseconds between actions being run. Defaults to 5000. If + // both this value and the throttle_period parameter are specified, Watcher uses + // the last parameter included in the request. + ThrottlePeriodInMillis *int64 `json:"throttle_period_in_millis,omitempty"` + // Transform The transform that processes the watch payload to prepare it for the watch + // actions. + Transform *types.TransformContainer `json:"transform,omitempty"` + // Trigger The trigger that defines when the watch should run. + Trigger *types.TriggerContainer `json:"trigger,omitempty"` } // NewRequest returns a Request @@ -103,16 +120,14 @@ func (s *Request) UnmarshalJSON(data []byte) error { } case "throttle_period": - var tmp json.RawMessage - if err := dec.Decode(&tmp); err != nil { + if err := dec.Decode(&s.ThrottlePeriod); err != nil { return fmt.Errorf("%s | %w", "ThrottlePeriod", err) } - o := string(tmp[:]) - o, err = strconv.Unquote(o) - if err != nil { - o = string(tmp[:]) + + case "throttle_period_in_millis": + if err := dec.Decode(&s.ThrottlePeriodInMillis); err != nil { + return fmt.Errorf("%s | %w", "ThrottlePeriodInMillis", err) } - s.ThrottlePeriod = &o case "transform": if err := dec.Decode(&s.Transform); err != nil { diff --git a/typedapi/watcher/putwatch/response.go b/typedapi/watcher/putwatch/response.go index 31065b6ad0..f633579f12 100644 --- a/typedapi/watcher/putwatch/response.go +++ b/typedapi/watcher/putwatch/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package putwatch // Response holds the response body struct for the package putwatch // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/put_watch/WatcherPutWatchResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/put_watch/WatcherPutWatchResponse.ts#L23-L31 type Response struct { Created bool `json:"created"` Id_ string `json:"_id"` diff --git a/typedapi/watcher/querywatches/query_watches.go b/typedapi/watcher/querywatches/query_watches.go index feb47a6c2e..1f13624c7b 100644 --- a/typedapi/watcher/querywatches/query_watches.go +++ b/typedapi/watcher/querywatches/query_watches.go @@ -16,9 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves stored watches. +// Query watches. +// Get all registered watches in a paginated manner and optionally filter +// watches by a query. +// +// Note that only the `_id` and `metadata.*` fields are queryable or sortable. package querywatches import ( @@ -73,7 +77,11 @@ func NewQueryWatchesFunc(tp elastictransport.Interface) NewQueryWatches { } } -// Retrieves stored watches. +// Query watches. +// Get all registered watches in a paginated manner and optionally filter +// watches by a query. +// +// Note that only the `_id` and `metadata.*` fields are queryable or sortable. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-query-watches.html func New(tp elastictransport.Interface) *QueryWatches { @@ -83,8 +91,6 @@ func New(tp elastictransport.Interface) *QueryWatches { headers: make(http.Header), buf: gobytes.NewBuffer(nil), - - req: NewRequest(), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -338,43 +344,74 @@ func (r *QueryWatches) Pretty(pretty bool) *QueryWatches { return r } -// From The offset from the first result to fetch. Needs to be non-negative. +// The offset from the first result to fetch. +// It must be non-negative. // API name: from func (r *QueryWatches) From(from int) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.From = &from return r } -// Query Optional, query filter watches to be returned. +// A query that filters the watches to be returned. // API name: query -func (r *QueryWatches) Query(query *types.Query) *QueryWatches { +func (r *QueryWatches) Query(query types.QueryVariant) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } - r.req.Query = query + r.req.Query = query.QueryCaster() return r } -// SearchAfter Optional search After to do pagination using last hit’s sort values. +// Retrieve the next page of hits using a set of sort values from the previous +// page. // API name: search_after -func (r *QueryWatches) SearchAfter(sortresults ...types.FieldValue) *QueryWatches { - r.req.SearchAfter = sortresults +func (r *QueryWatches) SearchAfter(sortresults ...types.FieldValueVariant) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } return r } -// Size The number of hits to return. Needs to be non-negative. +// The number of hits to return. +// It must be non-negative. // API name: size func (r *QueryWatches) Size(size int) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Size = &size return r } -// Sort Optional sort definition. +// One or more fields used to sort the search results. // API name: sort -func (r *QueryWatches) Sort(sorts ...types.SortCombinations) *QueryWatches { - r.req.Sort = sorts +func (r *QueryWatches) Sort(sorts ...types.SortCombinationsVariant) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } return r } diff --git a/typedapi/watcher/querywatches/request.go b/typedapi/watcher/querywatches/request.go index d431e75de1..74d7b43115 100644 --- a/typedapi/watcher/querywatches/request.go +++ b/typedapi/watcher/querywatches/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package querywatches @@ -33,18 +33,21 @@ import ( // Request holds the request body struct for the package querywatches // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/query_watches/WatcherQueryWatchesRequest.ts#L25-L48 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/query_watches/WatcherQueryWatchesRequest.ts#L25-L70 type Request struct { - // From The offset from the first result to fetch. Needs to be non-negative. + // From The offset from the first result to fetch. + // It must be non-negative. From *int `json:"from,omitempty"` - // Query Optional, query filter watches to be returned. + // Query A query that filters the watches to be returned. Query *types.Query `json:"query,omitempty"` - // SearchAfter Optional search After to do pagination using last hit’s sort values. + // SearchAfter Retrieve the next page of hits using a set of sort values from the previous + // page. SearchAfter []types.FieldValue `json:"search_after,omitempty"` - // Size The number of hits to return. Needs to be non-negative. + // Size The number of hits to return. + // It must be non-negative. Size *int `json:"size,omitempty"` - // Sort Optional sort definition. + // Sort One or more fields used to sort the search results. Sort []types.SortCombinations `json:"sort,omitempty"` } diff --git a/typedapi/watcher/querywatches/response.go b/typedapi/watcher/querywatches/response.go index 4b6272a3f5..7f5be2978a 100644 --- a/typedapi/watcher/querywatches/response.go +++ b/typedapi/watcher/querywatches/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package querywatches @@ -26,9 +26,13 @@ import ( // Response holds the response body struct for the package querywatches // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/query_watches/WatcherQueryWatchesResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/query_watches/WatcherQueryWatchesResponse.ts#L23-L34 type Response struct { - Count int `json:"count"` + + // Count The total number of watches found. + Count int `json:"count"` + // Watches A list of watches based on the `from`, `size`, or `search_after` request body + // parameters. Watches []types.QueryWatch `json:"watches"` } diff --git a/typedapi/watcher/start/response.go b/typedapi/watcher/start/response.go index 78e12ce26d..210f1bce93 100644 --- a/typedapi/watcher/start/response.go +++ b/typedapi/watcher/start/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package start // Response holds the response body struct for the package start // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/start/WatcherStartResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/start/WatcherStartResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/watcher/start/start.go b/typedapi/watcher/start/start.go index 2f4a78c787..a56f77d6b2 100644 --- a/typedapi/watcher/start/start.go +++ b/typedapi/watcher/start/start.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Starts Watcher if it is not already running. +// Start the watch service. +// Start the Watcher service if it is not already running. package start import ( @@ -68,7 +69,8 @@ func NewStartFunc(tp elastictransport.Interface) NewStart { } } -// Starts Watcher if it is not already running. +// Start the watch service. +// Start the Watcher service if it is not already running. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html func New(tp elastictransport.Interface) *Start { @@ -276,6 +278,14 @@ func (r *Start) Header(key, value string) *Start { return r } +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Start) MasterTimeout(duration string) *Start { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/watcher/stats/response.go b/typedapi/watcher/stats/response.go index 024197b280..8f17045b7d 100644 --- a/typedapi/watcher/stats/response.go +++ b/typedapi/watcher/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/stats/WatcherStatsResponse.ts#L24-L32 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/stats/WatcherStatsResponse.ts#L24-L32 type Response struct { ClusterName string `json:"cluster_name"` ManuallyStopped bool `json:"manually_stopped"` diff --git a/typedapi/watcher/stats/stats.go b/typedapi/watcher/stats/stats.go index d488387176..2cdb1259ed 100644 --- a/typedapi/watcher/stats/stats.go +++ b/typedapi/watcher/stats/stats.go @@ -16,9 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Retrieves the current Watcher metrics. +// Get Watcher statistics. +// This API always returns basic metrics. +// You retrieve more metrics by using the metric parameter. package stats import ( @@ -74,7 +76,9 @@ func NewStatsFunc(tp elastictransport.Interface) NewStats { } } -// Retrieves the current Watcher metrics. +// Get Watcher statistics. +// This API always returns basic metrics. +// You retrieve more metrics by using the metric parameter. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html func New(tp elastictransport.Interface) *Stats { diff --git a/typedapi/watcher/stop/response.go b/typedapi/watcher/stop/response.go index d06738b4f4..18d9fcc909 100644 --- a/typedapi/watcher/stop/response.go +++ b/typedapi/watcher/stop/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package stop // Response holds the response body struct for the package stop // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/watcher/stop/WatcherStopResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/stop/WatcherStopResponse.ts#L22-L24 type Response struct { // Acknowledged For a successful response, this value is always true. On failure, an diff --git a/typedapi/watcher/stop/stop.go b/typedapi/watcher/stop/stop.go index 9115d04e2e..9b0fc52943 100644 --- a/typedapi/watcher/stop/stop.go +++ b/typedapi/watcher/stop/stop.go @@ -16,9 +16,10 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Stops Watcher if it is running. +// Stop the watch service. +// Stop the Watcher service if it is running. package stop import ( @@ -68,7 +69,8 @@ func NewStopFunc(tp elastictransport.Interface) NewStop { } } -// Stops Watcher if it is running. +// Stop the watch service. +// Stop the Watcher service if it is running. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html func New(tp elastictransport.Interface) *Stop { @@ -276,6 +278,17 @@ func (r *Stop) Header(key, value string) *Stop { return r } +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Stop) MasterTimeout(duration string) *Stop { + r.values.Set("master_timeout", duration) + + return r +} + // ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors // when they occur. // API name: error_trace diff --git a/typedapi/watcher/updatesettings/request.go b/typedapi/watcher/updatesettings/request.go new file mode 100644 index 0000000000..da61202d82 --- /dev/null +++ b/typedapi/watcher/updatesettings/request.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package updatesettings + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/update_settings/WatcherUpdateSettingsRequest.ts#L24-L58 +type Request struct { + IndexAutoExpandReplicas *string `json:"index.auto_expand_replicas,omitempty"` + IndexNumberOfReplicas *int `json:"index.number_of_replicas,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatesettings request: %w", err) + } + + return &req, nil +} diff --git a/typedapi/watcher/updatesettings/response.go b/typedapi/watcher/updatesettings/response.go new file mode 100644 index 0000000000..0da27fa49d --- /dev/null +++ b/typedapi/watcher/updatesettings/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d + +package updatesettings + +// Response holds the response body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/watcher/update_settings/WatcherUpdateSettingsResponse.ts#L20-L24 +type Response struct { + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/typedapi/watcher/updatesettings/update_settings.go b/typedapi/watcher/updatesettings/update_settings.go index 0de08d30ef..c9a8e0d248 100644 --- a/typedapi/watcher/updatesettings/update_settings.go +++ b/typedapi/watcher/updatesettings/update_settings.go @@ -16,21 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Update settings for the watcher system index +// Update Watcher index settings. +// Update settings for the Watcher internal index (`.watches`). +// Only a subset of settings can be modified. +// This includes `index.auto_expand_replicas` and `index.number_of_replicas`. package updatesettings import ( + gobytes "bytes" "context" + "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -45,6 +52,10 @@ type UpdateSettings struct { raw io.Reader + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + paramSet int spanStarted bool @@ -65,7 +76,10 @@ func NewUpdateSettingsFunc(tp elastictransport.Interface) NewUpdateSettings { } } -// Update settings for the watcher system index +// Update Watcher index settings. +// Update settings for the Watcher internal index (`.watches`). +// Only a subset of settings can be modified. +// This includes `index.auto_expand_replicas` and `index.number_of_replicas`. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-update-settings.html func New(tp elastictransport.Interface) *UpdateSettings { @@ -73,6 +87,8 @@ func New(tp elastictransport.Interface) *UpdateSettings { transport: tp, values: make(url.Values), headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), } if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { @@ -84,6 +100,21 @@ func New(tp elastictransport.Interface) *UpdateSettings { return r } +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateSettings) Raw(raw io.Reader) *UpdateSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateSettings) Request(req *Request) *UpdateSettings { + r.req = req + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -93,6 +124,31 @@ func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + r.path.Scheme = "http" switch { @@ -180,13 +236,7 @@ func (r UpdateSettings) Perform(providedCtx context.Context) (*http.Response, er } // Do runs the request through the transport, handle the response and returns a updatesettings.Response -func (r UpdateSettings) Do(ctx context.Context) (bool, error) { - return r.IsSuccess(ctx) -} - -// IsSuccess allows to run a query with a context and retrieve the result as a boolean. -// This only exists for endpoints without a request payload and allows for quick control flow. -func (r UpdateSettings) IsSuccess(providedCtx context.Context) (bool, error) { +func (r UpdateSettings) Do(providedCtx context.Context) (*Response, error) { var ctx context.Context r.spanStarted = true if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { @@ -197,30 +247,46 @@ func (r UpdateSettings) IsSuccess(providedCtx context.Context) (bool, error) { ctx = providedCtx } - res, err := r.Perform(ctx) + response := NewResponse() + res, err := r.Perform(ctx) if err != nil { - return false, err - } - io.Copy(io.Discard, res.Body) - err = res.Body.Close() - if err != nil { - return false, err + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } - if res.StatusCode >= 200 && res.StatusCode < 300 { - return true, nil + return response, nil } - if res.StatusCode != 404 { - err := fmt.Errorf("an error happened during the UpdateSettings query execution, status code: %d", res.StatusCode) + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { instrument.RecordError(ctx, err) } - return false, err + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode } - return false, nil + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse } // Header set a key, value pair in the UpdateSettings headers map. @@ -229,3 +295,91 @@ func (r *UpdateSettings) Header(key, value string) *UpdateSettings { return r } + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *UpdateSettings) MasterTimeout(duration string) *UpdateSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *UpdateSettings) Timeout(duration string) *UpdateSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateSettings) ErrorTrace(errortrace bool) *UpdateSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateSettings) FilterPath(filterpaths ...string) *UpdateSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"eixsts_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateSettings) Human(human bool) *UpdateSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateSettings) Pretty(pretty bool) *UpdateSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: index.auto_expand_replicas +func (r *UpdateSettings) IndexAutoExpandReplicas(indexautoexpandreplicas string) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexAutoExpandReplicas = &indexautoexpandreplicas + + return r +} + +// API name: index.number_of_replicas +func (r *UpdateSettings) IndexNumberOfReplicas(indexnumberofreplicas int) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexNumberOfReplicas = &indexnumberofreplicas + + return r +} diff --git a/typedapi/xpack/info/info.go b/typedapi/xpack/info/info.go index bffbe6f7f5..f3c1860ffc 100644 --- a/typedapi/xpack/info/info.go +++ b/typedapi/xpack/info/info.go @@ -16,9 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// Provides general information about the installed X-Pack features. +// Get information. +// The information provided by the API includes: +// +// * Build information including the build number and timestamp. +// * License information about the currently installed license. +// * Feature information for the features that are currently enabled and +// available under the current license. package info import ( @@ -34,6 +40,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/xpackcategory" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -68,7 +75,13 @@ func NewInfoFunc(tp elastictransport.Interface) NewInfo { } } -// Provides general information about the installed X-Pack features. +// Get information. +// The information provided by the API includes: +// +// * Build information including the build number and timestamp. +// * License information about the currently installed license. +// * Feature information for the features that are currently enabled and +// available under the current license. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html func New(tp elastictransport.Interface) *Info { @@ -275,9 +288,10 @@ func (r *Info) Header(key, value string) *Info { } // Categories A comma-separated list of the information categories to include in the -// response. For example, `build,license,features`. +// response. +// For example, `build,license,features`. // API name: categories -func (r *Info) Categories(categories ...string) *Info { +func (r *Info) Categories(categories ...xpackcategory.XPackCategory) *Info { tmp := []string{} for _, item := range categories { tmp = append(tmp, fmt.Sprintf("%v", item)) @@ -296,7 +310,8 @@ func (r *Info) AcceptEnterprise(acceptenterprise bool) *Info { } // Human Defines whether additional human-readable information is included in the -// response. In particular, it adds descriptions and a tag line. +// response. +// In particular, it adds descriptions and a tag line. // API name: human func (r *Info) Human(human bool) *Info { r.values.Set("human", strconv.FormatBool(human)) diff --git a/typedapi/xpack/info/response.go b/typedapi/xpack/info/response.go index e7ea3240e0..ab639841ae 100644 --- a/typedapi/xpack/info/response.go +++ b/typedapi/xpack/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/info/XPackInfoResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/info/XPackInfoResponse.ts#L22-L29 type Response struct { Build types.BuildInformation `json:"build"` Features types.XpackFeatures `json:"features"` diff --git a/typedapi/xpack/usage/response.go b/typedapi/xpack/usage/response.go index 93c7247e26..023825aa93 100644 --- a/typedapi/xpack/usage/response.go +++ b/typedapi/xpack/usage/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d package usage @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package usage // -// https://github.com/elastic/elasticsearch-specification/blob/8e91c0692c0235474a0c21bb7e9716a8430e8533/specification/xpack/usage/XPackUsageResponse.ts#L43-L79 +// https://github.com/elastic/elasticsearch-specification/blob/3ea9ce260df22d3244bff5bace485dd97ff4046d/specification/xpack/usage/XPackUsageResponse.ts#L43-L79 type Response struct { AggregateMetric types.Base `json:"aggregate_metric"` Analytics types.Analytics `json:"analytics"` diff --git a/typedapi/xpack/usage/usage.go b/typedapi/xpack/usage/usage.go index 953e2a1bf6..ad4b00d33e 100644 --- a/typedapi/xpack/usage/usage.go +++ b/typedapi/xpack/usage/usage.go @@ -16,10 +16,12 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/8e91c0692c0235474a0c21bb7e9716a8430e8533 +// https://github.com/elastic/elasticsearch-specification/tree/3ea9ce260df22d3244bff5bace485dd97ff4046d -// This API provides information about which features are currently enabled and -// available under the current license and some usage statistics. +// Get usage information. +// Get information about the features that are currently enabled and available +// under the current license. +// The API also provides some usage statistics. package usage import ( @@ -69,8 +71,10 @@ func NewUsageFunc(tp elastictransport.Interface) NewUsage { } } -// This API provides information about which features are currently enabled and -// available under the current license and some usage statistics. +// Get usage information. +// Get information about the features that are currently enabled and available +// under the current license. +// The API also provides some usage statistics. // // https://www.elastic.co/guide/en/elasticsearch/reference/current/usage-api.html func New(tp elastictransport.Interface) *Usage { @@ -278,8 +282,10 @@ func (r *Usage) Header(key, value string) *Usage { return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, the request fails and returns an error. +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. // API name: master_timeout func (r *Usage) MasterTimeout(duration string) *Usage { r.values.Set("master_timeout", duration)